Bug Summary

File:llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Warning:line 6692, column 22
The right operand of '==' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AArch64AsmParser.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Target/AArch64/AsmParser -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Target/AArch64/AsmParser/.. -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/.. -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Target/AArch64/AsmParser -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-08-28-193554-24367-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "MCTargetDesc/AArch64AddressingModes.h"
10#include "MCTargetDesc/AArch64InstPrinter.h"
11#include "MCTargetDesc/AArch64MCExpr.h"
12#include "MCTargetDesc/AArch64MCTargetDesc.h"
13#include "MCTargetDesc/AArch64TargetStreamer.h"
14#include "TargetInfo/AArch64TargetInfo.h"
15#include "AArch64InstrInfo.h"
16#include "Utils/AArch64BaseInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringExtras.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/StringSwitch.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
31#include "llvm/MC/MCLinkerOptimizationHint.h"
32#include "llvm/MC/MCObjectFileInfo.h"
33#include "llvm/MC/MCParser/MCAsmLexer.h"
34#include "llvm/MC/MCParser/MCAsmParser.h"
35#include "llvm/MC/MCParser/MCAsmParserExtension.h"
36#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
37#include "llvm/MC/MCParser/MCTargetAsmParser.h"
38#include "llvm/MC/MCRegisterInfo.h"
39#include "llvm/MC/MCStreamer.h"
40#include "llvm/MC/MCSubtargetInfo.h"
41#include "llvm/MC/MCSymbol.h"
42#include "llvm/MC/MCTargetOptions.h"
43#include "llvm/MC/SubtargetFeature.h"
44#include "llvm/MC/MCValue.h"
45#include "llvm/Support/Casting.h"
46#include "llvm/Support/Compiler.h"
47#include "llvm/Support/ErrorHandling.h"
48#include "llvm/Support/MathExtras.h"
49#include "llvm/Support/SMLoc.h"
50#include "llvm/Support/TargetParser.h"
51#include "llvm/Support/TargetRegistry.h"
52#include "llvm/Support/raw_ostream.h"
53#include <cassert>
54#include <cctype>
55#include <cstdint>
56#include <cstdio>
57#include <string>
58#include <tuple>
59#include <utility>
60#include <vector>
61
62using namespace llvm;
63
64namespace {
65
66enum class RegKind {
67 Scalar,
68 NeonVector,
69 SVEDataVector,
70 SVEPredicateVector,
71 Matrix
72};
73
74enum class MatrixKind { Array, Tile, Row, Col };
75
76enum RegConstraintEqualityTy {
77 EqualsReg,
78 EqualsSuperReg,
79 EqualsSubReg
80};
81
82class AArch64AsmParser : public MCTargetAsmParser {
83private:
84 StringRef Mnemonic; ///< Instruction mnemonic.
85
86 // Map of register aliases registers via the .req directive.
87 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
88
89 class PrefixInfo {
90 public:
91 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
92 PrefixInfo Prefix;
93 switch (Inst.getOpcode()) {
94 case AArch64::MOVPRFX_ZZ:
95 Prefix.Active = true;
96 Prefix.Dst = Inst.getOperand(0).getReg();
97 break;
98 case AArch64::MOVPRFX_ZPmZ_B:
99 case AArch64::MOVPRFX_ZPmZ_H:
100 case AArch64::MOVPRFX_ZPmZ_S:
101 case AArch64::MOVPRFX_ZPmZ_D:
102 Prefix.Active = true;
103 Prefix.Predicated = true;
104 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
105 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 106, __extension__ __PRETTY_FUNCTION__))
106 "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 106, __extension__ __PRETTY_FUNCTION__))
;
107 Prefix.Dst = Inst.getOperand(0).getReg();
108 Prefix.Pg = Inst.getOperand(2).getReg();
109 break;
110 case AArch64::MOVPRFX_ZPzZ_B:
111 case AArch64::MOVPRFX_ZPzZ_H:
112 case AArch64::MOVPRFX_ZPzZ_S:
113 case AArch64::MOVPRFX_ZPzZ_D:
114 Prefix.Active = true;
115 Prefix.Predicated = true;
116 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
117 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 118, __extension__ __PRETTY_FUNCTION__))
118 "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 118, __extension__ __PRETTY_FUNCTION__))
;
119 Prefix.Dst = Inst.getOperand(0).getReg();
120 Prefix.Pg = Inst.getOperand(1).getReg();
121 break;
122 default:
123 break;
124 }
125
126 return Prefix;
127 }
128
129 PrefixInfo() : Active(false), Predicated(false) {}
130 bool isActive() const { return Active; }
131 bool isPredicated() const { return Predicated; }
132 unsigned getElementSize() const {
133 assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail
("Predicated", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 133, __extension__ __PRETTY_FUNCTION__))
;
134 return ElementSize;
135 }
136 unsigned getDstReg() const { return Dst; }
137 unsigned getPgReg() const {
138 assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail
("Predicated", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 138, __extension__ __PRETTY_FUNCTION__))
;
139 return Pg;
140 }
141
142 private:
143 bool Active;
144 bool Predicated;
145 unsigned ElementSize;
146 unsigned Dst;
147 unsigned Pg;
148 } NextPrefix;
149
150 AArch64TargetStreamer &getTargetStreamer() {
151 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
152 return static_cast<AArch64TargetStreamer &>(TS);
153 }
154
155 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
156
157 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
158 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
159 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
160 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
161 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
162 bool parseRegister(OperandVector &Operands);
163 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
164 bool parseNeonVectorList(OperandVector &Operands);
165 bool parseOptionalMulOperand(OperandVector &Operands);
166 bool parseKeywordOperand(OperandVector &Operands);
167 bool parseOperand(OperandVector &Operands, bool isCondCode,
168 bool invertCondCode);
169 bool parseImmExpr(int64_t &Out);
170 bool parseComma();
171 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
172 unsigned Last);
173
174 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
175 OperandVector &Operands);
176
177 bool parseDirectiveArch(SMLoc L);
178 bool parseDirectiveArchExtension(SMLoc L);
179 bool parseDirectiveCPU(SMLoc L);
180 bool parseDirectiveInst(SMLoc L);
181
182 bool parseDirectiveTLSDescCall(SMLoc L);
183
184 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
185 bool parseDirectiveLtorg(SMLoc L);
186
187 bool parseDirectiveReq(StringRef Name, SMLoc L);
188 bool parseDirectiveUnreq(SMLoc L);
189 bool parseDirectiveCFINegateRAState();
190 bool parseDirectiveCFIBKeyFrame();
191
192 bool parseDirectiveVariantPCS(SMLoc L);
193
194 bool parseDirectiveSEHAllocStack(SMLoc L);
195 bool parseDirectiveSEHPrologEnd(SMLoc L);
196 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
197 bool parseDirectiveSEHSaveFPLR(SMLoc L);
198 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
199 bool parseDirectiveSEHSaveReg(SMLoc L);
200 bool parseDirectiveSEHSaveRegX(SMLoc L);
201 bool parseDirectiveSEHSaveRegP(SMLoc L);
202 bool parseDirectiveSEHSaveRegPX(SMLoc L);
203 bool parseDirectiveSEHSaveLRPair(SMLoc L);
204 bool parseDirectiveSEHSaveFReg(SMLoc L);
205 bool parseDirectiveSEHSaveFRegX(SMLoc L);
206 bool parseDirectiveSEHSaveFRegP(SMLoc L);
207 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
208 bool parseDirectiveSEHSetFP(SMLoc L);
209 bool parseDirectiveSEHAddFP(SMLoc L);
210 bool parseDirectiveSEHNop(SMLoc L);
211 bool parseDirectiveSEHSaveNext(SMLoc L);
212 bool parseDirectiveSEHEpilogStart(SMLoc L);
213 bool parseDirectiveSEHEpilogEnd(SMLoc L);
214 bool parseDirectiveSEHTrapFrame(SMLoc L);
215 bool parseDirectiveSEHMachineFrame(SMLoc L);
216 bool parseDirectiveSEHContext(SMLoc L);
217 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
218
219 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
220 SmallVectorImpl<SMLoc> &Loc);
221 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
222 OperandVector &Operands, MCStreamer &Out,
223 uint64_t &ErrorInfo,
224 bool MatchingInlineAsm) override;
225/// @name Auto-generated Match Functions
226/// {
227
228#define GET_ASSEMBLER_HEADER
229#include "AArch64GenAsmMatcher.inc"
230
231 /// }
232
233 OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
234 OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
235 RegKind MatchKind);
236 OperandMatchResultTy tryParseMatrixRegister(OperandVector &Operands);
237 OperandMatchResultTy tryParseSVCR(OperandVector &Operands);
238 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
239 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
240 OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
241 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
242 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
243 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
244 template <bool IsSVEPrefetch = false>
245 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
246 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
247 OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
248 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
249 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
250 template<bool AddFPZeroAsLiteral>
251 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
252 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
253 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
254 bool tryParseNeonVectorRegister(OperandVector &Operands);
255 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
256 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
257 template <bool ParseShiftExtend,
258 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
259 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
260 template <bool ParseShiftExtend, bool ParseSuffix>
261 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
262 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
263 template <RegKind VectorKind>
264 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
265 bool ExpectMatch = false);
266 OperandMatchResultTy tryParseMatrixTileList(OperandVector &Operands);
267 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
268 OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands);
269
270public:
271 enum AArch64MatchResultTy {
272 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
273#define GET_OPERAND_DIAGNOSTIC_TYPES
274#include "AArch64GenAsmMatcher.inc"
275 };
276 bool IsILP32;
277
278 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
279 const MCInstrInfo &MII, const MCTargetOptions &Options)
280 : MCTargetAsmParser(Options, STI, MII) {
281 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
282 MCAsmParserExtension::Initialize(Parser);
283 MCStreamer &S = getParser().getStreamer();
284 if (S.getTargetStreamer() == nullptr)
285 new AArch64TargetStreamer(S);
286
287 // Alias .hword/.word/.[dx]word to the target-independent
288 // .2byte/.4byte/.8byte directives as they have the same form and
289 // semantics:
290 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
291 Parser.addAliasForDirective(".hword", ".2byte");
292 Parser.addAliasForDirective(".word", ".4byte");
293 Parser.addAliasForDirective(".dword", ".8byte");
294 Parser.addAliasForDirective(".xword", ".8byte");
295
296 // Initialize the set of available features.
297 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
298 }
299
300 bool regsEqual(const MCParsedAsmOperand &Op1,
301 const MCParsedAsmOperand &Op2) const override;
302 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
303 SMLoc NameLoc, OperandVector &Operands) override;
304 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
305 OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
306 SMLoc &EndLoc) override;
307 bool ParseDirective(AsmToken DirectiveID) override;
308 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
309 unsigned Kind) override;
310
311 static bool classifySymbolRef(const MCExpr *Expr,
312 AArch64MCExpr::VariantKind &ELFRefKind,
313 MCSymbolRefExpr::VariantKind &DarwinRefKind,
314 int64_t &Addend);
315};
316
317/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
318/// instruction.
319class AArch64Operand : public MCParsedAsmOperand {
320private:
321 enum KindTy {
322 k_Immediate,
323 k_ShiftedImm,
324 k_CondCode,
325 k_Register,
326 k_MatrixRegister,
327 k_MatrixTileList,
328 k_SVCR,
329 k_VectorList,
330 k_VectorIndex,
331 k_Token,
332 k_SysReg,
333 k_SysCR,
334 k_Prefetch,
335 k_ShiftExtend,
336 k_FPImm,
337 k_Barrier,
338 k_PSBHint,
339 k_BTIHint,
340 } Kind;
341
342 SMLoc StartLoc, EndLoc;
343
344 struct TokOp {
345 const char *Data;
346 unsigned Length;
347 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
348 };
349
350 // Separate shift/extend operand.
351 struct ShiftExtendOp {
352 AArch64_AM::ShiftExtendType Type;
353 unsigned Amount;
354 bool HasExplicitAmount;
355 };
356
357 struct RegOp {
358 unsigned RegNum;
359 RegKind Kind;
360 int ElementWidth;
361
362 // The register may be allowed as a different register class,
363 // e.g. for GPR64as32 or GPR32as64.
364 RegConstraintEqualityTy EqualityTy;
365
366 // In some cases the shift/extend needs to be explicitly parsed together
367 // with the register, rather than as a separate operand. This is needed
368 // for addressing modes where the instruction as a whole dictates the
369 // scaling/extend, rather than specific bits in the instruction.
370 // By parsing them as a single operand, we avoid the need to pass an
371 // extra operand in all CodeGen patterns (because all operands need to
372 // have an associated value), and we avoid the need to update TableGen to
373 // accept operands that have no associated bits in the instruction.
374 //
375 // An added benefit of parsing them together is that the assembler
376 // can give a sensible diagnostic if the scaling is not correct.
377 //
378 // The default is 'lsl #0' (HasExplicitAmount = false) if no
379 // ShiftExtend is specified.
380 ShiftExtendOp ShiftExtend;
381 };
382
383 struct MatrixRegOp {
384 unsigned RegNum;
385 unsigned ElementWidth;
386 MatrixKind Kind;
387 };
388
389 struct MatrixTileListOp {
390 unsigned RegMask = 0;
391 };
392
393 struct VectorListOp {
394 unsigned RegNum;
395 unsigned Count;
396 unsigned NumElements;
397 unsigned ElementWidth;
398 RegKind RegisterKind;
399 };
400
401 struct VectorIndexOp {
402 int Val;
403 };
404
405 struct ImmOp {
406 const MCExpr *Val;
407 };
408
409 struct ShiftedImmOp {
410 const MCExpr *Val;
411 unsigned ShiftAmount;
412 };
413
414 struct CondCodeOp {
415 AArch64CC::CondCode Code;
416 };
417
418 struct FPImmOp {
419 uint64_t Val; // APFloat value bitcasted to uint64_t.
420 bool IsExact; // describes whether parsed value was exact.
421 };
422
423 struct BarrierOp {
424 const char *Data;
425 unsigned Length;
426 unsigned Val; // Not the enum since not all values have names.
427 bool HasnXSModifier;
428 };
429
430 struct SysRegOp {
431 const char *Data;
432 unsigned Length;
433 uint32_t MRSReg;
434 uint32_t MSRReg;
435 uint32_t PStateField;
436 };
437
438 struct SysCRImmOp {
439 unsigned Val;
440 };
441
442 struct PrefetchOp {
443 const char *Data;
444 unsigned Length;
445 unsigned Val;
446 };
447
448 struct PSBHintOp {
449 const char *Data;
450 unsigned Length;
451 unsigned Val;
452 };
453
454 struct BTIHintOp {
455 const char *Data;
456 unsigned Length;
457 unsigned Val;
458 };
459
460 struct SVCROp {
461 const char *Data;
462 unsigned Length;
463 unsigned PStateField;
464 };
465
466 union {
467 struct TokOp Tok;
468 struct RegOp Reg;
469 struct MatrixRegOp MatrixReg;
470 struct MatrixTileListOp MatrixTileList;
471 struct VectorListOp VectorList;
472 struct VectorIndexOp VectorIndex;
473 struct ImmOp Imm;
474 struct ShiftedImmOp ShiftedImm;
475 struct CondCodeOp CondCode;
476 struct FPImmOp FPImm;
477 struct BarrierOp Barrier;
478 struct SysRegOp SysReg;
479 struct SysCRImmOp SysCRImm;
480 struct PrefetchOp Prefetch;
481 struct PSBHintOp PSBHint;
482 struct BTIHintOp BTIHint;
483 struct ShiftExtendOp ShiftExtend;
484 struct SVCROp SVCR;
485 };
486
487 // Keep the MCContext around as the MCExprs may need manipulated during
488 // the add<>Operands() calls.
489 MCContext &Ctx;
490
491public:
492 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
493
494 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
495 Kind = o.Kind;
496 StartLoc = o.StartLoc;
497 EndLoc = o.EndLoc;
498 switch (Kind) {
499 case k_Token:
500 Tok = o.Tok;
501 break;
502 case k_Immediate:
503 Imm = o.Imm;
504 break;
505 case k_ShiftedImm:
506 ShiftedImm = o.ShiftedImm;
507 break;
508 case k_CondCode:
509 CondCode = o.CondCode;
510 break;
511 case k_FPImm:
512 FPImm = o.FPImm;
513 break;
514 case k_Barrier:
515 Barrier = o.Barrier;
516 break;
517 case k_Register:
518 Reg = o.Reg;
519 break;
520 case k_MatrixRegister:
521 MatrixReg = o.MatrixReg;
522 break;
523 case k_MatrixTileList:
524 MatrixTileList = o.MatrixTileList;
525 break;
526 case k_VectorList:
527 VectorList = o.VectorList;
528 break;
529 case k_VectorIndex:
530 VectorIndex = o.VectorIndex;
531 break;
532 case k_SysReg:
533 SysReg = o.SysReg;
534 break;
535 case k_SysCR:
536 SysCRImm = o.SysCRImm;
537 break;
538 case k_Prefetch:
539 Prefetch = o.Prefetch;
540 break;
541 case k_PSBHint:
542 PSBHint = o.PSBHint;
543 break;
544 case k_BTIHint:
545 BTIHint = o.BTIHint;
546 break;
547 case k_ShiftExtend:
548 ShiftExtend = o.ShiftExtend;
549 break;
550 case k_SVCR:
551 SVCR = o.SVCR;
552 break;
553 }
554 }
555
556 /// getStartLoc - Get the location of the first token of this operand.
557 SMLoc getStartLoc() const override { return StartLoc; }
558 /// getEndLoc - Get the location of the last token of this operand.
559 SMLoc getEndLoc() const override { return EndLoc; }
560
561 StringRef getToken() const {
562 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 562, __extension__ __PRETTY_FUNCTION__))
;
563 return StringRef(Tok.Data, Tok.Length);
564 }
565
566 bool isTokenSuffix() const {
567 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 567, __extension__ __PRETTY_FUNCTION__))
;
568 return Tok.IsSuffix;
569 }
570
571 const MCExpr *getImm() const {
572 assert(Kind == k_Immediate && "Invalid access!")(static_cast <bool> (Kind == k_Immediate && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 572, __extension__ __PRETTY_FUNCTION__))
;
573 return Imm.Val;
574 }
575
576 const MCExpr *getShiftedImmVal() const {
577 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 577, __extension__ __PRETTY_FUNCTION__))
;
578 return ShiftedImm.Val;
579 }
580
581 unsigned getShiftedImmShift() const {
582 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 582, __extension__ __PRETTY_FUNCTION__))
;
583 return ShiftedImm.ShiftAmount;
584 }
585
586 AArch64CC::CondCode getCondCode() const {
587 assert(Kind == k_CondCode && "Invalid access!")(static_cast <bool> (Kind == k_CondCode && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 587, __extension__ __PRETTY_FUNCTION__))
;
588 return CondCode.Code;
589 }
590
591 APFloat getFPImm() const {
592 assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 592, __extension__ __PRETTY_FUNCTION__))
;
593 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
594 }
595
596 bool getFPImmIsExact() const {
597 assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 597, __extension__ __PRETTY_FUNCTION__))
;
598 return FPImm.IsExact;
599 }
600
601 unsigned getBarrier() const {
602 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 602, __extension__ __PRETTY_FUNCTION__))
;
603 return Barrier.Val;
604 }
605
606 StringRef getBarrierName() const {
607 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 607, __extension__ __PRETTY_FUNCTION__))
;
608 return StringRef(Barrier.Data, Barrier.Length);
609 }
610
611 bool getBarriernXSModifier() const {
612 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 612, __extension__ __PRETTY_FUNCTION__))
;
613 return Barrier.HasnXSModifier;
614 }
615
616 unsigned getReg() const override {
617 assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 617, __extension__ __PRETTY_FUNCTION__))
;
618 return Reg.RegNum;
619 }
620
621 unsigned getMatrixReg() const {
622 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 622, __extension__ __PRETTY_FUNCTION__))
;
623 return MatrixReg.RegNum;
624 }
625
626 unsigned getMatrixElementWidth() const {
627 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 627, __extension__ __PRETTY_FUNCTION__))
;
628 return MatrixReg.ElementWidth;
629 }
630
631 MatrixKind getMatrixKind() const {
632 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 632, __extension__ __PRETTY_FUNCTION__))
;
633 return MatrixReg.Kind;
634 }
635
636 unsigned getMatrixTileListRegMask() const {
637 assert(isMatrixTileList() && "Invalid access!")(static_cast <bool> (isMatrixTileList() && "Invalid access!"
) ? void (0) : __assert_fail ("isMatrixTileList() && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 637, __extension__ __PRETTY_FUNCTION__))
;
638 return MatrixTileList.RegMask;
639 }
640
641 RegConstraintEqualityTy getRegEqualityTy() const {
642 assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 642, __extension__ __PRETTY_FUNCTION__))
;
643 return Reg.EqualityTy;
644 }
645
646 unsigned getVectorListStart() const {
647 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 647, __extension__ __PRETTY_FUNCTION__))
;
648 return VectorList.RegNum;
649 }
650
651 unsigned getVectorListCount() const {
652 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 652, __extension__ __PRETTY_FUNCTION__))
;
653 return VectorList.Count;
654 }
655
656 int getVectorIndex() const {
657 assert(Kind == k_VectorIndex && "Invalid access!")(static_cast <bool> (Kind == k_VectorIndex && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 657, __extension__ __PRETTY_FUNCTION__))
;
658 return VectorIndex.Val;
659 }
660
661 StringRef getSysReg() const {
662 assert(Kind == k_SysReg && "Invalid access!")(static_cast <bool> (Kind == k_SysReg && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 662, __extension__ __PRETTY_FUNCTION__))
;
663 return StringRef(SysReg.Data, SysReg.Length);
664 }
665
666 unsigned getSysCR() const {
667 assert(Kind == k_SysCR && "Invalid access!")(static_cast <bool> (Kind == k_SysCR && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 667, __extension__ __PRETTY_FUNCTION__))
;
668 return SysCRImm.Val;
669 }
670
671 unsigned getPrefetch() const {
672 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 672, __extension__ __PRETTY_FUNCTION__))
;
673 return Prefetch.Val;
674 }
675
676 unsigned getPSBHint() const {
677 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 677, __extension__ __PRETTY_FUNCTION__))
;
678 return PSBHint.Val;
679 }
680
681 StringRef getPSBHintName() const {
682 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 682, __extension__ __PRETTY_FUNCTION__))
;
683 return StringRef(PSBHint.Data, PSBHint.Length);
684 }
685
686 unsigned getBTIHint() const {
687 assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 687, __extension__ __PRETTY_FUNCTION__))
;
688 return BTIHint.Val;
689 }
690
691 StringRef getBTIHintName() const {
692 assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 692, __extension__ __PRETTY_FUNCTION__))
;
693 return StringRef(BTIHint.Data, BTIHint.Length);
694 }
695
696 StringRef getSVCR() const {
697 assert(Kind == k_SVCR && "Invalid access!")(static_cast <bool> (Kind == k_SVCR && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SVCR && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 697, __extension__ __PRETTY_FUNCTION__))
;
698 return StringRef(SVCR.Data, SVCR.Length);
699 }
700
701 StringRef getPrefetchName() const {
702 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 702, __extension__ __PRETTY_FUNCTION__))
;
703 return StringRef(Prefetch.Data, Prefetch.Length);
704 }
705
706 AArch64_AM::ShiftExtendType getShiftExtendType() const {
707 if (Kind == k_ShiftExtend)
708 return ShiftExtend.Type;
709 if (Kind == k_Register)
710 return Reg.ShiftExtend.Type;
711 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 711)
;
712 }
713
714 unsigned getShiftExtendAmount() const {
715 if (Kind == k_ShiftExtend)
716 return ShiftExtend.Amount;
717 if (Kind == k_Register)
718 return Reg.ShiftExtend.Amount;
719 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 719)
;
720 }
721
722 bool hasShiftExtendAmount() const {
723 if (Kind == k_ShiftExtend)
724 return ShiftExtend.HasExplicitAmount;
725 if (Kind == k_Register)
726 return Reg.ShiftExtend.HasExplicitAmount;
727 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 727)
;
728 }
729
730 bool isImm() const override { return Kind == k_Immediate; }
731 bool isMem() const override { return false; }
732
733 bool isUImm6() const {
734 if (!isImm())
735 return false;
736 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
737 if (!MCE)
738 return false;
739 int64_t Val = MCE->getValue();
740 return (Val >= 0 && Val < 64);
741 }
742
743 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
744
745 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
746 return isImmScaled<Bits, Scale>(true);
747 }
748
749 template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
750 return isImmScaled<Bits, Scale>(false);
751 }
752
753 template <int Bits, int Scale>
754 DiagnosticPredicate isImmScaled(bool Signed) const {
755 if (!isImm())
756 return DiagnosticPredicateTy::NoMatch;
757
758 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
759 if (!MCE)
760 return DiagnosticPredicateTy::NoMatch;
761
762 int64_t MinVal, MaxVal;
763 if (Signed) {
764 int64_t Shift = Bits - 1;
765 MinVal = (int64_t(1) << Shift) * -Scale;
766 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
767 } else {
768 MinVal = 0;
769 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
770 }
771
772 int64_t Val = MCE->getValue();
773 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
774 return DiagnosticPredicateTy::Match;
775
776 return DiagnosticPredicateTy::NearMatch;
777 }
778
779 DiagnosticPredicate isSVEPattern() const {
780 if (!isImm())
781 return DiagnosticPredicateTy::NoMatch;
782 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
783 if (!MCE)
784 return DiagnosticPredicateTy::NoMatch;
785 int64_t Val = MCE->getValue();
786 if (Val >= 0 && Val < 32)
787 return DiagnosticPredicateTy::Match;
788 return DiagnosticPredicateTy::NearMatch;
789 }
790
791 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
792 AArch64MCExpr::VariantKind ELFRefKind;
793 MCSymbolRefExpr::VariantKind DarwinRefKind;
794 int64_t Addend;
795 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
796 Addend)) {
797 // If we don't understand the expression, assume the best and
798 // let the fixup and relocation code deal with it.
799 return true;
800 }
801
802 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
803 ELFRefKind == AArch64MCExpr::VK_LO12 ||
804 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
805 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
806 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
807 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
808 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
809 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
810 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
811 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
812 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
813 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
814 // Note that we don't range-check the addend. It's adjusted modulo page
815 // size when converted, so there is no "out of range" condition when using
816 // @pageoff.
817 return true;
818 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
819 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
820 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
821 return Addend == 0;
822 }
823
824 return false;
825 }
826
827 template <int Scale> bool isUImm12Offset() const {
828 if (!isImm())
829 return false;
830
831 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
832 if (!MCE)
833 return isSymbolicUImm12Offset(getImm());
834
835 int64_t Val = MCE->getValue();
836 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
837 }
838
839 template <int N, int M>
840 bool isImmInRange() const {
841 if (!isImm())
842 return false;
843 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
844 if (!MCE)
845 return false;
846 int64_t Val = MCE->getValue();
847 return (Val >= N && Val <= M);
848 }
849
850 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
851 // a logical immediate can always be represented when inverted.
852 template <typename T>
853 bool isLogicalImm() const {
854 if (!isImm())
855 return false;
856 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
857 if (!MCE)
858 return false;
859
860 int64_t Val = MCE->getValue();
861 // Avoid left shift by 64 directly.
862 uint64_t Upper = UINT64_C(-1)-1UL << (sizeof(T) * 4) << (sizeof(T) * 4);
863 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
864 if ((Val & Upper) && (Val & Upper) != Upper)
865 return false;
866
867 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
868 }
869
870 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
871
872 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
873 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
874 /// immediate that can be shifted by 'Shift'.
875 template <unsigned Width>
876 Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
877 if (isShiftedImm() && Width == getShiftedImmShift())
878 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
879 return std::make_pair(CE->getValue(), Width);
880
881 if (isImm())
882 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
883 int64_t Val = CE->getValue();
884 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
885 return std::make_pair(Val >> Width, Width);
886 else
887 return std::make_pair(Val, 0u);
888 }
889
890 return {};
891 }
892
893 bool isAddSubImm() const {
894 if (!isShiftedImm() && !isImm())
895 return false;
896
897 const MCExpr *Expr;
898
899 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
900 if (isShiftedImm()) {
901 unsigned Shift = ShiftedImm.ShiftAmount;
902 Expr = ShiftedImm.Val;
903 if (Shift != 0 && Shift != 12)
904 return false;
905 } else {
906 Expr = getImm();
907 }
908
909 AArch64MCExpr::VariantKind ELFRefKind;
910 MCSymbolRefExpr::VariantKind DarwinRefKind;
911 int64_t Addend;
912 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
913 DarwinRefKind, Addend)) {
914 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
915 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
916 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
917 || ELFRefKind == AArch64MCExpr::VK_LO12
918 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
919 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
920 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
921 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
922 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
923 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
924 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
925 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
926 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
927 }
928
929 // If it's a constant, it should be a real immediate in range.
930 if (auto ShiftedVal = getShiftedVal<12>())
931 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
932
933 // If it's an expression, we hope for the best and let the fixup/relocation
934 // code deal with it.
935 return true;
936 }
937
938 bool isAddSubImmNeg() const {
939 if (!isShiftedImm() && !isImm())
940 return false;
941
942 // Otherwise it should be a real negative immediate in range.
943 if (auto ShiftedVal = getShiftedVal<12>())
944 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
945
946 return false;
947 }
948
949 // Signed value in the range -128 to +127. For element widths of
950 // 16 bits or higher it may also be a signed multiple of 256 in the
951 // range -32768 to +32512.
952 // For element-width of 8 bits a range of -128 to 255 is accepted,
953 // since a copy of a byte can be either signed/unsigned.
954 template <typename T>
955 DiagnosticPredicate isSVECpyImm() const {
956 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
957 return DiagnosticPredicateTy::NoMatch;
958
959 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
960 std::is_same<int8_t, T>::value;
961 if (auto ShiftedImm = getShiftedVal<8>())
962 if (!(IsByte && ShiftedImm->second) &&
963 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
964 << ShiftedImm->second))
965 return DiagnosticPredicateTy::Match;
966
967 return DiagnosticPredicateTy::NearMatch;
968 }
969
970 // Unsigned value in the range 0 to 255. For element widths of
971 // 16 bits or higher it may also be a signed multiple of 256 in the
972 // range 0 to 65280.
973 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
974 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
975 return DiagnosticPredicateTy::NoMatch;
976
977 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
978 std::is_same<int8_t, T>::value;
979 if (auto ShiftedImm = getShiftedVal<8>())
980 if (!(IsByte && ShiftedImm->second) &&
981 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
982 << ShiftedImm->second))
983 return DiagnosticPredicateTy::Match;
984
985 return DiagnosticPredicateTy::NearMatch;
986 }
987
988 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
989 if (isLogicalImm<T>() && !isSVECpyImm<T>())
990 return DiagnosticPredicateTy::Match;
991 return DiagnosticPredicateTy::NoMatch;
992 }
993
994 bool isCondCode() const { return Kind == k_CondCode; }
995
996 bool isSIMDImmType10() const {
997 if (!isImm())
998 return false;
999 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1000 if (!MCE)
1001 return false;
1002 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
1003 }
1004
1005 template<int N>
1006 bool isBranchTarget() const {
1007 if (!isImm())
1008 return false;
1009 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1010 if (!MCE)
1011 return true;
1012 int64_t Val = MCE->getValue();
1013 if (Val & 0x3)
1014 return false;
1015 assert(N > 0 && "Branch target immediate cannot be 0 bits!")(static_cast <bool> (N > 0 && "Branch target immediate cannot be 0 bits!"
) ? void (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1015, __extension__ __PRETTY_FUNCTION__))
;
1016 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1017 }
1018
1019 bool
1020 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1021 if (!isImm())
1022 return false;
1023
1024 AArch64MCExpr::VariantKind ELFRefKind;
1025 MCSymbolRefExpr::VariantKind DarwinRefKind;
1026 int64_t Addend;
1027 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1028 DarwinRefKind, Addend)) {
1029 return false;
1030 }
1031 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1032 return false;
1033
1034 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
1035 if (ELFRefKind == AllowedModifiers[i])
1036 return true;
1037 }
1038
1039 return false;
1040 }
1041
1042 bool isMovWSymbolG3() const {
1043 return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
1044 }
1045
1046 bool isMovWSymbolG2() const {
1047 return isMovWSymbol(
1048 {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
1049 AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
1050 AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
1051 AArch64MCExpr::VK_DTPREL_G2});
1052 }
1053
1054 bool isMovWSymbolG1() const {
1055 return isMovWSymbol(
1056 {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
1057 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
1058 AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
1059 AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
1060 AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
1061 }
1062
1063 bool isMovWSymbolG0() const {
1064 return isMovWSymbol(
1065 {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
1066 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
1067 AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
1068 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
1069 AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
1070 }
1071
1072 template<int RegWidth, int Shift>
1073 bool isMOVZMovAlias() const {
1074 if (!isImm()) return false;
1075
1076 const MCExpr *E = getImm();
1077 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1078 uint64_t Value = CE->getValue();
1079
1080 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1081 }
1082 // Only supports the case of Shift being 0 if an expression is used as an
1083 // operand
1084 return !Shift && E;
1085 }
1086
1087 template<int RegWidth, int Shift>
1088 bool isMOVNMovAlias() const {
1089 if (!isImm()) return false;
1090
1091 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1092 if (!CE) return false;
1093 uint64_t Value = CE->getValue();
1094
1095 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1096 }
1097
1098 bool isFPImm() const {
1099 return Kind == k_FPImm &&
1100 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1101 }
1102
1103 bool isBarrier() const {
1104 return Kind == k_Barrier && !getBarriernXSModifier();
1105 }
1106 bool isBarriernXS() const {
1107 return Kind == k_Barrier && getBarriernXSModifier();
1108 }
1109 bool isSysReg() const { return Kind == k_SysReg; }
1110
1111 bool isMRSSystemRegister() const {
1112 if (!isSysReg()) return false;
1113
1114 return SysReg.MRSReg != -1U;
1115 }
1116
1117 bool isMSRSystemRegister() const {
1118 if (!isSysReg()) return false;
1119 return SysReg.MSRReg != -1U;
1120 }
1121
1122 bool isSystemPStateFieldWithImm0_1() const {
1123 if (!isSysReg()) return false;
1124 return (SysReg.PStateField == AArch64PState::PAN ||
1125 SysReg.PStateField == AArch64PState::DIT ||
1126 SysReg.PStateField == AArch64PState::UAO ||
1127 SysReg.PStateField == AArch64PState::SSBS);
1128 }
1129
1130 bool isSystemPStateFieldWithImm0_15() const {
1131 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1132 return SysReg.PStateField != -1U;
1133 }
1134
1135 bool isSVCR() const {
1136 if (Kind != k_SVCR)
1137 return false;
1138 return SVCR.PStateField != -1U;
1139 }
1140
1141 bool isReg() const override {
1142 return Kind == k_Register;
1143 }
1144
1145 bool isScalarReg() const {
1146 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1147 }
1148
1149 bool isNeonVectorReg() const {
1150 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1151 }
1152
1153 bool isNeonVectorRegLo() const {
1154 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1155 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1156 Reg.RegNum) ||
1157 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1158 Reg.RegNum));
1159 }
1160
1161 bool isMatrix() const { return Kind == k_MatrixRegister; }
1162 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1163
1164 template <unsigned Class> bool isSVEVectorReg() const {
1165 RegKind RK;
1166 switch (Class) {
1167 case AArch64::ZPRRegClassID:
1168 case AArch64::ZPR_3bRegClassID:
1169 case AArch64::ZPR_4bRegClassID:
1170 RK = RegKind::SVEDataVector;
1171 break;
1172 case AArch64::PPRRegClassID:
1173 case AArch64::PPR_3bRegClassID:
1174 RK = RegKind::SVEPredicateVector;
1175 break;
1176 default:
1177 llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class",
"/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1177)
;
1178 }
1179
1180 return (Kind == k_Register && Reg.Kind == RK) &&
1181 AArch64MCRegisterClasses[Class].contains(getReg());
1182 }
1183
1184 template <unsigned Class> bool isFPRasZPR() const {
1185 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1186 AArch64MCRegisterClasses[Class].contains(getReg());
1187 }
1188
1189 template <int ElementWidth, unsigned Class>
1190 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1191 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1192 return DiagnosticPredicateTy::NoMatch;
1193
1194 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1195 return DiagnosticPredicateTy::Match;
1196
1197 return DiagnosticPredicateTy::NearMatch;
1198 }
1199
1200 template <int ElementWidth, unsigned Class>
1201 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1202 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1203 return DiagnosticPredicateTy::NoMatch;
1204
1205 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1206 return DiagnosticPredicateTy::Match;
1207
1208 return DiagnosticPredicateTy::NearMatch;
1209 }
1210
1211 template <int ElementWidth, unsigned Class,
1212 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1213 bool ShiftWidthAlwaysSame>
1214 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1215 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1216 if (!VectorMatch.isMatch())
1217 return DiagnosticPredicateTy::NoMatch;
1218
1219 // Give a more specific diagnostic when the user has explicitly typed in
1220 // a shift-amount that does not match what is expected, but for which
1221 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1222 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1223 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1224 ShiftExtendTy == AArch64_AM::SXTW) &&
1225 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1226 return DiagnosticPredicateTy::NoMatch;
1227
1228 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1229 return DiagnosticPredicateTy::Match;
1230
1231 return DiagnosticPredicateTy::NearMatch;
1232 }
1233
1234 bool isGPR32as64() const {
1235 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1236 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1237 }
1238
1239 bool isGPR64as32() const {
1240 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1241 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1242 }
1243
1244 bool isGPR64x8() const {
1245 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1246 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1247 Reg.RegNum);
1248 }
1249
1250 bool isWSeqPair() const {
1251 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1252 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1253 Reg.RegNum);
1254 }
1255
1256 bool isXSeqPair() const {
1257 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1258 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1259 Reg.RegNum);
1260 }
1261
1262 template<int64_t Angle, int64_t Remainder>
1263 DiagnosticPredicate isComplexRotation() const {
1264 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1265
1266 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1267 if (!CE) return DiagnosticPredicateTy::NoMatch;
1268 uint64_t Value = CE->getValue();
1269
1270 if (Value % Angle == Remainder && Value <= 270)
1271 return DiagnosticPredicateTy::Match;
1272 return DiagnosticPredicateTy::NearMatch;
1273 }
1274
1275 template <unsigned RegClassID> bool isGPR64() const {
1276 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1277 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1278 }
1279
1280 template <unsigned RegClassID, int ExtWidth>
1281 DiagnosticPredicate isGPR64WithShiftExtend() const {
1282 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1283 return DiagnosticPredicateTy::NoMatch;
1284
1285 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1286 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1287 return DiagnosticPredicateTy::Match;
1288 return DiagnosticPredicateTy::NearMatch;
1289 }
1290
1291 /// Is this a vector list with the type implicit (presumably attached to the
1292 /// instruction itself)?
1293 template <RegKind VectorKind, unsigned NumRegs>
1294 bool isImplicitlyTypedVectorList() const {
1295 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1296 VectorList.NumElements == 0 &&
1297 VectorList.RegisterKind == VectorKind;
1298 }
1299
1300 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1301 unsigned ElementWidth>
1302 bool isTypedVectorList() const {
1303 if (Kind != k_VectorList)
1304 return false;
1305 if (VectorList.Count != NumRegs)
1306 return false;
1307 if (VectorList.RegisterKind != VectorKind)
1308 return false;
1309 if (VectorList.ElementWidth != ElementWidth)
1310 return false;
1311 return VectorList.NumElements == NumElements;
1312 }
1313
1314 template <int Min, int Max>
1315 DiagnosticPredicate isVectorIndex() const {
1316 if (Kind != k_VectorIndex)
1317 return DiagnosticPredicateTy::NoMatch;
1318 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1319 return DiagnosticPredicateTy::Match;
1320 return DiagnosticPredicateTy::NearMatch;
1321 }
1322
1323 bool isToken() const override { return Kind == k_Token; }
1324
1325 bool isTokenEqual(StringRef Str) const {
1326 return Kind == k_Token && getToken() == Str;
4
Assuming field 'Kind' is not equal to k_Token
5
Returning zero, which participates in a condition later
1327 }
1328 bool isSysCR() const { return Kind == k_SysCR; }
1329 bool isPrefetch() const { return Kind == k_Prefetch; }
1330 bool isPSBHint() const { return Kind == k_PSBHint; }
1331 bool isBTIHint() const { return Kind == k_BTIHint; }
1332 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1333 bool isShifter() const {
1334 if (!isShiftExtend())
1335 return false;
1336
1337 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1338 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1339 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1340 ST == AArch64_AM::MSL);
1341 }
1342
1343 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1344 if (Kind != k_FPImm)
1345 return DiagnosticPredicateTy::NoMatch;
1346
1347 if (getFPImmIsExact()) {
1348 // Lookup the immediate from table of supported immediates.
1349 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1350 assert(Desc && "Unknown enum value")(static_cast <bool> (Desc && "Unknown enum value"
) ? void (0) : __assert_fail ("Desc && \"Unknown enum value\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1350, __extension__ __PRETTY_FUNCTION__))
;
1351
1352 // Calculate its FP value.
1353 APFloat RealVal(APFloat::IEEEdouble());
1354 auto StatusOrErr =
1355 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1356 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1357 llvm_unreachable("FP immediate is not exact")::llvm::llvm_unreachable_internal("FP immediate is not exact"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1357)
;
1358
1359 if (getFPImm().bitwiseIsEqual(RealVal))
1360 return DiagnosticPredicateTy::Match;
1361 }
1362
1363 return DiagnosticPredicateTy::NearMatch;
1364 }
1365
1366 template <unsigned ImmA, unsigned ImmB>
1367 DiagnosticPredicate isExactFPImm() const {
1368 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1369 if ((Res = isExactFPImm<ImmA>()))
1370 return DiagnosticPredicateTy::Match;
1371 if ((Res = isExactFPImm<ImmB>()))
1372 return DiagnosticPredicateTy::Match;
1373 return Res;
1374 }
1375
1376 bool isExtend() const {
1377 if (!isShiftExtend())
1378 return false;
1379
1380 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1381 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1382 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1383 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1384 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1385 ET == AArch64_AM::LSL) &&
1386 getShiftExtendAmount() <= 4;
1387 }
1388
1389 bool isExtend64() const {
1390 if (!isExtend())
1391 return false;
1392 // Make sure the extend expects a 32-bit source register.
1393 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1394 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1395 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1396 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1397 }
1398
1399 bool isExtendLSL64() const {
1400 if (!isExtend())
1401 return false;
1402 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1403 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1404 ET == AArch64_AM::LSL) &&
1405 getShiftExtendAmount() <= 4;
1406 }
1407
1408 template<int Width> bool isMemXExtend() const {
1409 if (!isExtend())
1410 return false;
1411 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1412 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1413 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1414 getShiftExtendAmount() == 0);
1415 }
1416
1417 template<int Width> bool isMemWExtend() const {
1418 if (!isExtend())
1419 return false;
1420 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1421 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1422 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1423 getShiftExtendAmount() == 0);
1424 }
1425
1426 template <unsigned width>
1427 bool isArithmeticShifter() const {
1428 if (!isShifter())
1429 return false;
1430
1431 // An arithmetic shifter is LSL, LSR, or ASR.
1432 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1433 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1434 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1435 }
1436
1437 template <unsigned width>
1438 bool isLogicalShifter() const {
1439 if (!isShifter())
1440 return false;
1441
1442 // A logical shifter is LSL, LSR, ASR or ROR.
1443 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1444 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1445 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1446 getShiftExtendAmount() < width;
1447 }
1448
1449 bool isMovImm32Shifter() const {
1450 if (!isShifter())
1451 return false;
1452
1453 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1454 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1455 if (ST != AArch64_AM::LSL)
1456 return false;
1457 uint64_t Val = getShiftExtendAmount();
1458 return (Val == 0 || Val == 16);
1459 }
1460
1461 bool isMovImm64Shifter() const {
1462 if (!isShifter())
1463 return false;
1464
1465 // A MOVi shifter is LSL of 0 or 16.
1466 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1467 if (ST != AArch64_AM::LSL)
1468 return false;
1469 uint64_t Val = getShiftExtendAmount();
1470 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1471 }
1472
1473 bool isLogicalVecShifter() const {
1474 if (!isShifter())
1475 return false;
1476
1477 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1478 unsigned Shift = getShiftExtendAmount();
1479 return getShiftExtendType() == AArch64_AM::LSL &&
1480 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1481 }
1482
1483 bool isLogicalVecHalfWordShifter() const {
1484 if (!isLogicalVecShifter())
1485 return false;
1486
1487 // A logical vector shifter is a left shift by 0 or 8.
1488 unsigned Shift = getShiftExtendAmount();
1489 return getShiftExtendType() == AArch64_AM::LSL &&
1490 (Shift == 0 || Shift == 8);
1491 }
1492
1493 bool isMoveVecShifter() const {
1494 if (!isShiftExtend())
1495 return false;
1496
1497 // A logical vector shifter is a left shift by 8 or 16.
1498 unsigned Shift = getShiftExtendAmount();
1499 return getShiftExtendType() == AArch64_AM::MSL &&
1500 (Shift == 8 || Shift == 16);
1501 }
1502
1503 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1504 // to LDUR/STUR when the offset is not legal for the former but is for
1505 // the latter. As such, in addition to checking for being a legal unscaled
1506 // address, also check that it is not a legal scaled address. This avoids
1507 // ambiguity in the matcher.
1508 template<int Width>
1509 bool isSImm9OffsetFB() const {
1510 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1511 }
1512
1513 bool isAdrpLabel() const {
1514 // Validation was handled during parsing, so we just sanity check that
1515 // something didn't go haywire.
1516 if (!isImm())
1517 return false;
1518
1519 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1520 int64_t Val = CE->getValue();
1521 int64_t Min = - (4096 * (1LL << (21 - 1)));
1522 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1523 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1524 }
1525
1526 return true;
1527 }
1528
1529 bool isAdrLabel() const {
1530 // Validation was handled during parsing, so we just sanity check that
1531 // something didn't go haywire.
1532 if (!isImm())
1533 return false;
1534
1535 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1536 int64_t Val = CE->getValue();
1537 int64_t Min = - (1LL << (21 - 1));
1538 int64_t Max = ((1LL << (21 - 1)) - 1);
1539 return Val >= Min && Val <= Max;
1540 }
1541
1542 return true;
1543 }
1544
1545 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1546 DiagnosticPredicate isMatrixRegOperand() const {
1547 if (!isMatrix())
1548 return DiagnosticPredicateTy::NoMatch;
1549 if (getMatrixKind() != Kind ||
1550 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1551 EltSize != getMatrixElementWidth())
1552 return DiagnosticPredicateTy::NearMatch;
1553 return DiagnosticPredicateTy::Match;
1554 }
1555
1556 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1557 // Add as immediates when possible. Null MCExpr = 0.
1558 if (!Expr)
1559 Inst.addOperand(MCOperand::createImm(0));
1560 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1561 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1562 else
1563 Inst.addOperand(MCOperand::createExpr(Expr));
1564 }
1565
1566 void addRegOperands(MCInst &Inst, unsigned N) const {
1567 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1567, __extension__ __PRETTY_FUNCTION__))
;
1568 Inst.addOperand(MCOperand::createReg(getReg()));
1569 }
1570
1571 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1572 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1572, __extension__ __PRETTY_FUNCTION__))
;
1573 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1574 }
1575
1576 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1577 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1577, __extension__ __PRETTY_FUNCTION__))
;
1578 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1579, __extension__ __PRETTY_FUNCTION__))
1579 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1579, __extension__ __PRETTY_FUNCTION__))
;
1580
1581 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1582 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1583 RI->getEncodingValue(getReg()));
1584
1585 Inst.addOperand(MCOperand::createReg(Reg));
1586 }
1587
1588 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1589 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1589, __extension__ __PRETTY_FUNCTION__))
;
1590 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1591, __extension__ __PRETTY_FUNCTION__))
1591 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1591, __extension__ __PRETTY_FUNCTION__))
;
1592
1593 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1594 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1595 RI->getEncodingValue(getReg()));
1596
1597 Inst.addOperand(MCOperand::createReg(Reg));
1598 }
1599
1600 template <int Width>
1601 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1602 unsigned Base;
1603 switch (Width) {
1604 case 8: Base = AArch64::B0; break;
1605 case 16: Base = AArch64::H0; break;
1606 case 32: Base = AArch64::S0; break;
1607 case 64: Base = AArch64::D0; break;
1608 case 128: Base = AArch64::Q0; break;
1609 default:
1610 llvm_unreachable("Unsupported width")::llvm::llvm_unreachable_internal("Unsupported width", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1610)
;
1611 }
1612 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1613 }
1614
1615 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1616 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1616, __extension__ __PRETTY_FUNCTION__))
;
1617 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1618, __extension__ __PRETTY_FUNCTION__))
1618 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1618, __extension__ __PRETTY_FUNCTION__))
;
1619 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1620 }
1621
1622 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1623 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1623, __extension__ __PRETTY_FUNCTION__))
;
1624 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1625, __extension__ __PRETTY_FUNCTION__))
1625 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1625, __extension__ __PRETTY_FUNCTION__))
;
1626 Inst.addOperand(MCOperand::createReg(getReg()));
1627 }
1628
1629 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1630 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1630, __extension__ __PRETTY_FUNCTION__))
;
1631 Inst.addOperand(MCOperand::createReg(getReg()));
1632 }
1633
1634 enum VecListIndexType {
1635 VecListIdx_DReg = 0,
1636 VecListIdx_QReg = 1,
1637 VecListIdx_ZReg = 2,
1638 };
1639
1640 template <VecListIndexType RegTy, unsigned NumRegs>
1641 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1642 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1642, __extension__ __PRETTY_FUNCTION__))
;
1643 static const unsigned FirstRegs[][5] = {
1644 /* DReg */ { AArch64::Q0,
1645 AArch64::D0, AArch64::D0_D1,
1646 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1647 /* QReg */ { AArch64::Q0,
1648 AArch64::Q0, AArch64::Q0_Q1,
1649 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1650 /* ZReg */ { AArch64::Z0,
1651 AArch64::Z0, AArch64::Z0_Z1,
1652 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1653 };
1654
1655 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs
<= 4) && " NumRegs must be <= 4 for ZRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1656, __extension__ __PRETTY_FUNCTION__))
1656 " NumRegs must be <= 4 for ZRegs")(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs
<= 4) && " NumRegs must be <= 4 for ZRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1656, __extension__ __PRETTY_FUNCTION__))
;
1657
1658 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1659 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1660 FirstRegs[(unsigned)RegTy][0]));
1661 }
1662
1663 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1664 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1664, __extension__ __PRETTY_FUNCTION__))
;
1665 unsigned RegMask = getMatrixTileListRegMask();
1666 assert(RegMask <= 0xFF && "Invalid mask!")(static_cast <bool> (RegMask <= 0xFF && "Invalid mask!"
) ? void (0) : __assert_fail ("RegMask <= 0xFF && \"Invalid mask!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1666, __extension__ __PRETTY_FUNCTION__))
;
1667 Inst.addOperand(MCOperand::createImm(RegMask));
1668 }
1669
1670 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1671 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1671, __extension__ __PRETTY_FUNCTION__))
;
1672 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1673 }
1674
1675 template <unsigned ImmIs0, unsigned ImmIs1>
1676 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1677 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1677, __extension__ __PRETTY_FUNCTION__))
;
1678 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")(static_cast <bool> (bool(isExactFPImm<ImmIs0, ImmIs1
>()) && "Invalid operand") ? void (0) : __assert_fail
("bool(isExactFPImm<ImmIs0, ImmIs1>()) && \"Invalid operand\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1678, __extension__ __PRETTY_FUNCTION__))
;
1679 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1680 }
1681
1682 void addImmOperands(MCInst &Inst, unsigned N) const {
1683 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1683, __extension__ __PRETTY_FUNCTION__))
;
1684 // If this is a pageoff symrefexpr with an addend, adjust the addend
1685 // to be only the page-offset portion. Otherwise, just add the expr
1686 // as-is.
1687 addExpr(Inst, getImm());
1688 }
1689
1690 template <int Shift>
1691 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1692 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1692, __extension__ __PRETTY_FUNCTION__))
;
1693 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1694 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1695 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1696 } else if (isShiftedImm()) {
1697 addExpr(Inst, getShiftedImmVal());
1698 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1699 } else {
1700 addExpr(Inst, getImm());
1701 Inst.addOperand(MCOperand::createImm(0));
1702 }
1703 }
1704
1705 template <int Shift>
1706 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1707 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1707, __extension__ __PRETTY_FUNCTION__))
;
1708 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1709 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1710 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1711 } else
1712 llvm_unreachable("Not a shifted negative immediate")::llvm::llvm_unreachable_internal("Not a shifted negative immediate"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1712)
;
1713 }
1714
1715 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1716 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1716, __extension__ __PRETTY_FUNCTION__))
;
1717 Inst.addOperand(MCOperand::createImm(getCondCode()));
1718 }
1719
1720 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1721 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1721, __extension__ __PRETTY_FUNCTION__))
;
1722 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1723 if (!MCE)
1724 addExpr(Inst, getImm());
1725 else
1726 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1727 }
1728
1729 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1730 addImmOperands(Inst, N);
1731 }
1732
1733 template<int Scale>
1734 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1735 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1735, __extension__ __PRETTY_FUNCTION__))
;
1736 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1737
1738 if (!MCE) {
1739 Inst.addOperand(MCOperand::createExpr(getImm()));
1740 return;
1741 }
1742 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1743 }
1744
1745 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1746 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1746, __extension__ __PRETTY_FUNCTION__))
;
1747 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1748 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1749 }
1750
1751 template <int Scale>
1752 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1753 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1753, __extension__ __PRETTY_FUNCTION__))
;
1754 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1755 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1756 }
1757
1758 template <typename T>
1759 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1760 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1760, __extension__ __PRETTY_FUNCTION__))
;
1761 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1762 std::make_unsigned_t<T> Val = MCE->getValue();
1763 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1764 Inst.addOperand(MCOperand::createImm(encoding));
1765 }
1766
1767 template <typename T>
1768 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1769 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1769, __extension__ __PRETTY_FUNCTION__))
;
1770 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1771 std::make_unsigned_t<T> Val = ~MCE->getValue();
1772 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1773 Inst.addOperand(MCOperand::createImm(encoding));
1774 }
1775
1776 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1777 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1777, __extension__ __PRETTY_FUNCTION__))
;
1778 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1779 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1780 Inst.addOperand(MCOperand::createImm(encoding));
1781 }
1782
1783 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1784 // Branch operands don't encode the low bits, so shift them off
1785 // here. If it's a label, however, just put it on directly as there's
1786 // not enough information now to do anything.
1787 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1787, __extension__ __PRETTY_FUNCTION__))
;
1788 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1789 if (!MCE) {
1790 addExpr(Inst, getImm());
1791 return;
1792 }
1793 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1793, __extension__ __PRETTY_FUNCTION__))
;
1794 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1795 }
1796
1797 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1798 // Branch operands don't encode the low bits, so shift them off
1799 // here. If it's a label, however, just put it on directly as there's
1800 // not enough information now to do anything.
1801 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1801, __extension__ __PRETTY_FUNCTION__))
;
1802 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1803 if (!MCE) {
1804 addExpr(Inst, getImm());
1805 return;
1806 }
1807 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1807, __extension__ __PRETTY_FUNCTION__))
;
1808 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1809 }
1810
1811 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1812 // Branch operands don't encode the low bits, so shift them off
1813 // here. If it's a label, however, just put it on directly as there's
1814 // not enough information now to do anything.
1815 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1815, __extension__ __PRETTY_FUNCTION__))
;
1816 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1817 if (!MCE) {
1818 addExpr(Inst, getImm());
1819 return;
1820 }
1821 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1821, __extension__ __PRETTY_FUNCTION__))
;
1822 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1823 }
1824
1825 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1826 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1826, __extension__ __PRETTY_FUNCTION__))
;
1827 Inst.addOperand(MCOperand::createImm(
1828 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1829 }
1830
1831 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1832 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1832, __extension__ __PRETTY_FUNCTION__))
;
1833 Inst.addOperand(MCOperand::createImm(getBarrier()));
1834 }
1835
1836 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
1837 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1837, __extension__ __PRETTY_FUNCTION__))
;
1838 Inst.addOperand(MCOperand::createImm(getBarrier()));
1839 }
1840
1841 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1842 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1842, __extension__ __PRETTY_FUNCTION__))
;
1843
1844 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1845 }
1846
1847 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1848 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1848, __extension__ __PRETTY_FUNCTION__))
;
1849
1850 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1851 }
1852
1853 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1854 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1854, __extension__ __PRETTY_FUNCTION__))
;
1855
1856 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1857 }
1858
1859 void addSVCROperands(MCInst &Inst, unsigned N) const {
1860 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1860, __extension__ __PRETTY_FUNCTION__))
;
1861
1862 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
1863 }
1864
1865 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1866 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1866, __extension__ __PRETTY_FUNCTION__))
;
1867
1868 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1869 }
1870
1871 void addSysCROperands(MCInst &Inst, unsigned N) const {
1872 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1872, __extension__ __PRETTY_FUNCTION__))
;
1873 Inst.addOperand(MCOperand::createImm(getSysCR()));
1874 }
1875
1876 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1877 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1877, __extension__ __PRETTY_FUNCTION__))
;
1878 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1879 }
1880
1881 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1882 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1882, __extension__ __PRETTY_FUNCTION__))
;
1883 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1884 }
1885
1886 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1887 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1887, __extension__ __PRETTY_FUNCTION__))
;
1888 Inst.addOperand(MCOperand::createImm(getBTIHint()));
1889 }
1890
1891 void addShifterOperands(MCInst &Inst, unsigned N) const {
1892 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1892, __extension__ __PRETTY_FUNCTION__))
;
1893 unsigned Imm =
1894 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1895 Inst.addOperand(MCOperand::createImm(Imm));
1896 }
1897
1898 void addExtendOperands(MCInst &Inst, unsigned N) const {
1899 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1899, __extension__ __PRETTY_FUNCTION__))
;
1900 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1901 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1902 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1903 Inst.addOperand(MCOperand::createImm(Imm));
1904 }
1905
1906 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1907 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1907, __extension__ __PRETTY_FUNCTION__))
;
1908 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1909 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1910 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1911 Inst.addOperand(MCOperand::createImm(Imm));
1912 }
1913
1914 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1915 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1915, __extension__ __PRETTY_FUNCTION__))
;
1916 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1917 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1918 Inst.addOperand(MCOperand::createImm(IsSigned));
1919 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1920 }
1921
1922 // For 8-bit load/store instructions with a register offset, both the
1923 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1924 // they're disambiguated by whether the shift was explicit or implicit rather
1925 // than its size.
1926 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1927 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1927, __extension__ __PRETTY_FUNCTION__))
;
1928 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1929 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1930 Inst.addOperand(MCOperand::createImm(IsSigned));
1931 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1932 }
1933
1934 template<int Shift>
1935 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1936 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1936, __extension__ __PRETTY_FUNCTION__))
;
1937
1938 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1939 if (CE) {
1940 uint64_t Value = CE->getValue();
1941 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1942 } else {
1943 addExpr(Inst, getImm());
1944 }
1945 }
1946
1947 template<int Shift>
1948 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1949 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1949, __extension__ __PRETTY_FUNCTION__))
;
1950
1951 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1952 uint64_t Value = CE->getValue();
1953 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1954 }
1955
1956 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1957 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1957, __extension__ __PRETTY_FUNCTION__))
;
1958 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1959 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1960 }
1961
1962 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1963 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1963, __extension__ __PRETTY_FUNCTION__))
;
1964 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1965 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1966 }
1967
1968 void print(raw_ostream &OS) const override;
1969
1970 static std::unique_ptr<AArch64Operand>
1971 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
1972 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1973 Op->Tok.Data = Str.data();
1974 Op->Tok.Length = Str.size();
1975 Op->Tok.IsSuffix = IsSuffix;
1976 Op->StartLoc = S;
1977 Op->EndLoc = S;
1978 return Op;
1979 }
1980
1981 static std::unique_ptr<AArch64Operand>
1982 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1983 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1984 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1985 unsigned ShiftAmount = 0,
1986 unsigned HasExplicitAmount = false) {
1987 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1988 Op->Reg.RegNum = RegNum;
1989 Op->Reg.Kind = Kind;
1990 Op->Reg.ElementWidth = 0;
1991 Op->Reg.EqualityTy = EqTy;
1992 Op->Reg.ShiftExtend.Type = ExtTy;
1993 Op->Reg.ShiftExtend.Amount = ShiftAmount;
1994 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1995 Op->StartLoc = S;
1996 Op->EndLoc = E;
1997 return Op;
1998 }
1999
2000 static std::unique_ptr<AArch64Operand>
2001 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2002 SMLoc S, SMLoc E, MCContext &Ctx,
2003 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2004 unsigned ShiftAmount = 0,
2005 unsigned HasExplicitAmount = false) {
2006 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
) && "Invalid vector kind") ? void (0) : __assert_fail
("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2008, __extension__ __PRETTY_FUNCTION__))
2007 Kind == RegKind::SVEPredicateVector) &&(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
) && "Invalid vector kind") ? void (0) : __assert_fail
("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2008, __extension__ __PRETTY_FUNCTION__))
2008 "Invalid vector kind")(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
) && "Invalid vector kind") ? void (0) : __assert_fail
("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2008, __extension__ __PRETTY_FUNCTION__))
;
2009 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2010 HasExplicitAmount);
2011 Op->Reg.ElementWidth = ElementWidth;
2012 return Op;
2013 }
2014
2015 static std::unique_ptr<AArch64Operand>
2016 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
2017 unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
2018 MCContext &Ctx) {
2019 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2020 Op->VectorList.RegNum = RegNum;
2021 Op->VectorList.Count = Count;
2022 Op->VectorList.NumElements = NumElements;
2023 Op->VectorList.ElementWidth = ElementWidth;
2024 Op->VectorList.RegisterKind = RegisterKind;
2025 Op->StartLoc = S;
2026 Op->EndLoc = E;
2027 return Op;
2028 }
2029
2030 static std::unique_ptr<AArch64Operand>
2031 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2032 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2033 Op->VectorIndex.Val = Idx;
2034 Op->StartLoc = S;
2035 Op->EndLoc = E;
2036 return Op;
2037 }
2038
2039 static std::unique_ptr<AArch64Operand>
2040 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2041 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2042 Op->MatrixTileList.RegMask = RegMask;
2043 Op->StartLoc = S;
2044 Op->EndLoc = E;
2045 return Op;
2046 }
2047
2048 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2049 const unsigned ElementWidth) {
2050 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2051 RegMap = {
2052 {{0, AArch64::ZAB0},
2053 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2054 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2055 {{8, AArch64::ZAB0},
2056 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2057 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2058 {{16, AArch64::ZAH0},
2059 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2060 {{16, AArch64::ZAH1},
2061 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2062 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2063 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2064 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2065 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2066 };
2067
2068 if (ElementWidth == 64)
2069 OutRegs.insert(Reg);
2070 else {
2071 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2072 assert(!Regs.empty() && "Invalid tile or element width!")(static_cast <bool> (!Regs.empty() && "Invalid tile or element width!"
) ? void (0) : __assert_fail ("!Regs.empty() && \"Invalid tile or element width!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2072, __extension__ __PRETTY_FUNCTION__))
;
2073 for (auto OutReg : Regs)
2074 OutRegs.insert(OutReg);
2075 }
2076 }
2077
2078 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2079 SMLoc E, MCContext &Ctx) {
2080 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2081 Op->Imm.Val = Val;
2082 Op->StartLoc = S;
2083 Op->EndLoc = E;
2084 return Op;
2085 }
2086
2087 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2088 unsigned ShiftAmount,
2089 SMLoc S, SMLoc E,
2090 MCContext &Ctx) {
2091 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2092 Op->ShiftedImm .Val = Val;
2093 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2094 Op->StartLoc = S;
2095 Op->EndLoc = E;
2096 return Op;
2097 }
2098
2099 static std::unique_ptr<AArch64Operand>
2100 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2101 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2102 Op->CondCode.Code = Code;
2103 Op->StartLoc = S;
2104 Op->EndLoc = E;
2105 return Op;
2106 }
2107
2108 static std::unique_ptr<AArch64Operand>
2109 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2110 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2111 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2112 Op->FPImm.IsExact = IsExact;
2113 Op->StartLoc = S;
2114 Op->EndLoc = S;
2115 return Op;
2116 }
2117
2118 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2119 StringRef Str,
2120 SMLoc S,
2121 MCContext &Ctx,
2122 bool HasnXSModifier) {
2123 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2124 Op->Barrier.Val = Val;
2125 Op->Barrier.Data = Str.data();
2126 Op->Barrier.Length = Str.size();
2127 Op->Barrier.HasnXSModifier = HasnXSModifier;
2128 Op->StartLoc = S;
2129 Op->EndLoc = S;
2130 return Op;
2131 }
2132
2133 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2134 uint32_t MRSReg,
2135 uint32_t MSRReg,
2136 uint32_t PStateField,
2137 MCContext &Ctx) {
2138 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2139 Op->SysReg.Data = Str.data();
2140 Op->SysReg.Length = Str.size();
2141 Op->SysReg.MRSReg = MRSReg;
2142 Op->SysReg.MSRReg = MSRReg;
2143 Op->SysReg.PStateField = PStateField;
2144 Op->StartLoc = S;
2145 Op->EndLoc = S;
2146 return Op;
2147 }
2148
2149 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2150 SMLoc E, MCContext &Ctx) {
2151 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2152 Op->SysCRImm.Val = Val;
2153 Op->StartLoc = S;
2154 Op->EndLoc = E;
2155 return Op;
2156 }
2157
2158 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2159 StringRef Str,
2160 SMLoc S,
2161 MCContext &Ctx) {
2162 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2163 Op->Prefetch.Val = Val;
2164 Op->Barrier.Data = Str.data();
2165 Op->Barrier.Length = Str.size();
2166 Op->StartLoc = S;
2167 Op->EndLoc = S;
2168 return Op;
2169 }
2170
2171 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2172 StringRef Str,
2173 SMLoc S,
2174 MCContext &Ctx) {
2175 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2176 Op->PSBHint.Val = Val;
2177 Op->PSBHint.Data = Str.data();
2178 Op->PSBHint.Length = Str.size();
2179 Op->StartLoc = S;
2180 Op->EndLoc = S;
2181 return Op;
2182 }
2183
2184 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2185 StringRef Str,
2186 SMLoc S,
2187 MCContext &Ctx) {
2188 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2189 Op->BTIHint.Val = Val | 32;
2190 Op->BTIHint.Data = Str.data();
2191 Op->BTIHint.Length = Str.size();
2192 Op->StartLoc = S;
2193 Op->EndLoc = S;
2194 return Op;
2195 }
2196
2197 static std::unique_ptr<AArch64Operand>
2198 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2199 SMLoc S, SMLoc E, MCContext &Ctx) {
2200 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2201 Op->MatrixReg.RegNum = RegNum;
2202 Op->MatrixReg.ElementWidth = ElementWidth;
2203 Op->MatrixReg.Kind = Kind;
2204 Op->StartLoc = S;
2205 Op->EndLoc = E;
2206 return Op;
2207 }
2208
2209 static std::unique_ptr<AArch64Operand>
2210 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2211 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2212 Op->SVCR.PStateField = PStateField;
2213 Op->SVCR.Data = Str.data();
2214 Op->SVCR.Length = Str.size();
2215 Op->StartLoc = S;
2216 Op->EndLoc = S;
2217 return Op;
2218 }
2219
2220 static std::unique_ptr<AArch64Operand>
2221 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2222 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2223 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2224 Op->ShiftExtend.Type = ShOp;
2225 Op->ShiftExtend.Amount = Val;
2226 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2227 Op->StartLoc = S;
2228 Op->EndLoc = E;
2229 return Op;
2230 }
2231};
2232
2233} // end anonymous namespace.
2234
2235void AArch64Operand::print(raw_ostream &OS) const {
2236 switch (Kind) {
2237 case k_FPImm:
2238 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2239 if (!getFPImmIsExact())
2240 OS << " (inexact)";
2241 OS << ">";
2242 break;
2243 case k_Barrier: {
2244 StringRef Name = getBarrierName();
2245 if (!Name.empty())
2246 OS << "<barrier " << Name << ">";
2247 else
2248 OS << "<barrier invalid #" << getBarrier() << ">";
2249 break;
2250 }
2251 case k_Immediate:
2252 OS << *getImm();
2253 break;
2254 case k_ShiftedImm: {
2255 unsigned Shift = getShiftedImmShift();
2256 OS << "<shiftedimm ";
2257 OS << *getShiftedImmVal();
2258 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2259 break;
2260 }
2261 case k_CondCode:
2262 OS << "<condcode " << getCondCode() << ">";
2263 break;
2264 case k_VectorList: {
2265 OS << "<vectorlist ";
2266 unsigned Reg = getVectorListStart();
2267 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2268 OS << Reg + i << " ";
2269 OS << ">";
2270 break;
2271 }
2272 case k_VectorIndex:
2273 OS << "<vectorindex " << getVectorIndex() << ">";
2274 break;
2275 case k_SysReg:
2276 OS << "<sysreg: " << getSysReg() << '>';
2277 break;
2278 case k_Token:
2279 OS << "'" << getToken() << "'";
2280 break;
2281 case k_SysCR:
2282 OS << "c" << getSysCR();
2283 break;
2284 case k_Prefetch: {
2285 StringRef Name = getPrefetchName();
2286 if (!Name.empty())
2287 OS << "<prfop " << Name << ">";
2288 else
2289 OS << "<prfop invalid #" << getPrefetch() << ">";
2290 break;
2291 }
2292 case k_PSBHint:
2293 OS << getPSBHintName();
2294 break;
2295 case k_BTIHint:
2296 OS << getBTIHintName();
2297 break;
2298 case k_MatrixRegister:
2299 OS << "<matrix " << getMatrixReg() << ">";
2300 break;
2301 case k_MatrixTileList: {
2302 OS << "<matrixlist ";
2303 unsigned RegMask = getMatrixTileListRegMask();
2304 unsigned MaxBits = 8;
2305 for (unsigned I = MaxBits; I > 0; --I)
2306 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2307 OS << '>';
2308 break;
2309 }
2310 case k_SVCR: {
2311 OS << getSVCR();
2312 break;
2313 }
2314 case k_Register:
2315 OS << "<register " << getReg() << ">";
2316 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2317 break;
2318 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2319 case k_ShiftExtend:
2320 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2321 << getShiftExtendAmount();
2322 if (!hasShiftExtendAmount())
2323 OS << "<imp>";
2324 OS << '>';
2325 break;
2326 }
2327}
2328
2329/// @name Auto-generated Match Functions
2330/// {
2331
2332static unsigned MatchRegisterName(StringRef Name);
2333
2334/// }
2335
2336static unsigned MatchNeonVectorRegName(StringRef Name) {
2337 return StringSwitch<unsigned>(Name.lower())
2338 .Case("v0", AArch64::Q0)
2339 .Case("v1", AArch64::Q1)
2340 .Case("v2", AArch64::Q2)
2341 .Case("v3", AArch64::Q3)
2342 .Case("v4", AArch64::Q4)
2343 .Case("v5", AArch64::Q5)
2344 .Case("v6", AArch64::Q6)
2345 .Case("v7", AArch64::Q7)
2346 .Case("v8", AArch64::Q8)
2347 .Case("v9", AArch64::Q9)
2348 .Case("v10", AArch64::Q10)
2349 .Case("v11", AArch64::Q11)
2350 .Case("v12", AArch64::Q12)
2351 .Case("v13", AArch64::Q13)
2352 .Case("v14", AArch64::Q14)
2353 .Case("v15", AArch64::Q15)
2354 .Case("v16", AArch64::Q16)
2355 .Case("v17", AArch64::Q17)
2356 .Case("v18", AArch64::Q18)
2357 .Case("v19", AArch64::Q19)
2358 .Case("v20", AArch64::Q20)
2359 .Case("v21", AArch64::Q21)
2360 .Case("v22", AArch64::Q22)
2361 .Case("v23", AArch64::Q23)
2362 .Case("v24", AArch64::Q24)
2363 .Case("v25", AArch64::Q25)
2364 .Case("v26", AArch64::Q26)
2365 .Case("v27", AArch64::Q27)
2366 .Case("v28", AArch64::Q28)
2367 .Case("v29", AArch64::Q29)
2368 .Case("v30", AArch64::Q30)
2369 .Case("v31", AArch64::Q31)
2370 .Default(0);
2371}
2372
2373/// Returns an optional pair of (#elements, element-width) if Suffix
2374/// is a valid vector kind. Where the number of elements in a vector
2375/// or the vector width is implicit or explicitly unknown (but still a
2376/// valid suffix kind), 0 is used.
2377static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2378 RegKind VectorKind) {
2379 std::pair<int, int> Res = {-1, -1};
2380
2381 switch (VectorKind) {
2382 case RegKind::NeonVector:
2383 Res =
2384 StringSwitch<std::pair<int, int>>(Suffix.lower())
2385 .Case("", {0, 0})
2386 .Case(".1d", {1, 64})
2387 .Case(".1q", {1, 128})
2388 // '.2h' needed for fp16 scalar pairwise reductions
2389 .Case(".2h", {2, 16})
2390 .Case(".2s", {2, 32})
2391 .Case(".2d", {2, 64})
2392 // '.4b' is another special case for the ARMv8.2a dot product
2393 // operand
2394 .Case(".4b", {4, 8})
2395 .Case(".4h", {4, 16})
2396 .Case(".4s", {4, 32})
2397 .Case(".8b", {8, 8})
2398 .Case(".8h", {8, 16})
2399 .Case(".16b", {16, 8})
2400 // Accept the width neutral ones, too, for verbose syntax. If those
2401 // aren't used in the right places, the token operand won't match so
2402 // all will work out.
2403 .Case(".b", {0, 8})
2404 .Case(".h", {0, 16})
2405 .Case(".s", {0, 32})
2406 .Case(".d", {0, 64})
2407 .Default({-1, -1});
2408 break;
2409 case RegKind::SVEPredicateVector:
2410 case RegKind::SVEDataVector:
2411 case RegKind::Matrix:
2412 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2413 .Case("", {0, 0})
2414 .Case(".b", {0, 8})
2415 .Case(".h", {0, 16})
2416 .Case(".s", {0, 32})
2417 .Case(".d", {0, 64})
2418 .Case(".q", {0, 128})
2419 .Default({-1, -1});
2420 break;
2421 default:
2422 llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2422)
;
2423 }
2424
2425 if (Res == std::make_pair(-1, -1))
2426 return Optional<std::pair<int, int>>();
2427
2428 return Optional<std::pair<int, int>>(Res);
2429}
2430
2431static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2432 return parseVectorKind(Suffix, VectorKind).hasValue();
2433}
2434
2435static unsigned matchSVEDataVectorRegName(StringRef Name) {
2436 return StringSwitch<unsigned>(Name.lower())
2437 .Case("z0", AArch64::Z0)
2438 .Case("z1", AArch64::Z1)
2439 .Case("z2", AArch64::Z2)
2440 .Case("z3", AArch64::Z3)
2441 .Case("z4", AArch64::Z4)
2442 .Case("z5", AArch64::Z5)
2443 .Case("z6", AArch64::Z6)
2444 .Case("z7", AArch64::Z7)
2445 .Case("z8", AArch64::Z8)
2446 .Case("z9", AArch64::Z9)
2447 .Case("z10", AArch64::Z10)
2448 .Case("z11", AArch64::Z11)
2449 .Case("z12", AArch64::Z12)
2450 .Case("z13", AArch64::Z13)
2451 .Case("z14", AArch64::Z14)
2452 .Case("z15", AArch64::Z15)
2453 .Case("z16", AArch64::Z16)
2454 .Case("z17", AArch64::Z17)
2455 .Case("z18", AArch64::Z18)
2456 .Case("z19", AArch64::Z19)
2457 .Case("z20", AArch64::Z20)
2458 .Case("z21", AArch64::Z21)
2459 .Case("z22", AArch64::Z22)
2460 .Case("z23", AArch64::Z23)
2461 .Case("z24", AArch64::Z24)
2462 .Case("z25", AArch64::Z25)
2463 .Case("z26", AArch64::Z26)
2464 .Case("z27", AArch64::Z27)
2465 .Case("z28", AArch64::Z28)
2466 .Case("z29", AArch64::Z29)
2467 .Case("z30", AArch64::Z30)
2468 .Case("z31", AArch64::Z31)
2469 .Default(0);
2470}
2471
2472static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2473 return StringSwitch<unsigned>(Name.lower())
2474 .Case("p0", AArch64::P0)
2475 .Case("p1", AArch64::P1)
2476 .Case("p2", AArch64::P2)
2477 .Case("p3", AArch64::P3)
2478 .Case("p4", AArch64::P4)
2479 .Case("p5", AArch64::P5)
2480 .Case("p6", AArch64::P6)
2481 .Case("p7", AArch64::P7)
2482 .Case("p8", AArch64::P8)
2483 .Case("p9", AArch64::P9)
2484 .Case("p10", AArch64::P10)
2485 .Case("p11", AArch64::P11)
2486 .Case("p12", AArch64::P12)
2487 .Case("p13", AArch64::P13)
2488 .Case("p14", AArch64::P14)
2489 .Case("p15", AArch64::P15)
2490 .Default(0);
2491}
2492
2493static unsigned matchMatrixTileListRegName(StringRef Name) {
2494 return StringSwitch<unsigned>(Name.lower())
2495 .Case("za0.d", AArch64::ZAD0)
2496 .Case("za1.d", AArch64::ZAD1)
2497 .Case("za2.d", AArch64::ZAD2)
2498 .Case("za3.d", AArch64::ZAD3)
2499 .Case("za4.d", AArch64::ZAD4)
2500 .Case("za5.d", AArch64::ZAD5)
2501 .Case("za6.d", AArch64::ZAD6)
2502 .Case("za7.d", AArch64::ZAD7)
2503 .Case("za0.s", AArch64::ZAS0)
2504 .Case("za1.s", AArch64::ZAS1)
2505 .Case("za2.s", AArch64::ZAS2)
2506 .Case("za3.s", AArch64::ZAS3)
2507 .Case("za0.h", AArch64::ZAH0)
2508 .Case("za1.h", AArch64::ZAH1)
2509 .Case("za0.b", AArch64::ZAB0)
2510 .Default(0);
2511}
2512
2513static unsigned matchMatrixRegName(StringRef Name) {
2514 return StringSwitch<unsigned>(Name.lower())
2515 .Case("za", AArch64::ZA)
2516 .Case("za0.q", AArch64::ZAQ0)
2517 .Case("za1.q", AArch64::ZAQ1)
2518 .Case("za2.q", AArch64::ZAQ2)
2519 .Case("za3.q", AArch64::ZAQ3)
2520 .Case("za4.q", AArch64::ZAQ4)
2521 .Case("za5.q", AArch64::ZAQ5)
2522 .Case("za6.q", AArch64::ZAQ6)
2523 .Case("za7.q", AArch64::ZAQ7)
2524 .Case("za8.q", AArch64::ZAQ8)
2525 .Case("za9.q", AArch64::ZAQ9)
2526 .Case("za10.q", AArch64::ZAQ10)
2527 .Case("za11.q", AArch64::ZAQ11)
2528 .Case("za12.q", AArch64::ZAQ12)
2529 .Case("za13.q", AArch64::ZAQ13)
2530 .Case("za14.q", AArch64::ZAQ14)
2531 .Case("za15.q", AArch64::ZAQ15)
2532 .Case("za0.d", AArch64::ZAD0)
2533 .Case("za1.d", AArch64::ZAD1)
2534 .Case("za2.d", AArch64::ZAD2)
2535 .Case("za3.d", AArch64::ZAD3)
2536 .Case("za4.d", AArch64::ZAD4)
2537 .Case("za5.d", AArch64::ZAD5)
2538 .Case("za6.d", AArch64::ZAD6)
2539 .Case("za7.d", AArch64::ZAD7)
2540 .Case("za0.s", AArch64::ZAS0)
2541 .Case("za1.s", AArch64::ZAS1)
2542 .Case("za2.s", AArch64::ZAS2)
2543 .Case("za3.s", AArch64::ZAS3)
2544 .Case("za0.h", AArch64::ZAH0)
2545 .Case("za1.h", AArch64::ZAH1)
2546 .Case("za0.b", AArch64::ZAB0)
2547 .Case("za0h.q", AArch64::ZAQ0)
2548 .Case("za1h.q", AArch64::ZAQ1)
2549 .Case("za2h.q", AArch64::ZAQ2)
2550 .Case("za3h.q", AArch64::ZAQ3)
2551 .Case("za4h.q", AArch64::ZAQ4)
2552 .Case("za5h.q", AArch64::ZAQ5)
2553 .Case("za6h.q", AArch64::ZAQ6)
2554 .Case("za7h.q", AArch64::ZAQ7)
2555 .Case("za8h.q", AArch64::ZAQ8)
2556 .Case("za9h.q", AArch64::ZAQ9)
2557 .Case("za10h.q", AArch64::ZAQ10)
2558 .Case("za11h.q", AArch64::ZAQ11)
2559 .Case("za12h.q", AArch64::ZAQ12)
2560 .Case("za13h.q", AArch64::ZAQ13)
2561 .Case("za14h.q", AArch64::ZAQ14)
2562 .Case("za15h.q", AArch64::ZAQ15)
2563 .Case("za0h.d", AArch64::ZAD0)
2564 .Case("za1h.d", AArch64::ZAD1)
2565 .Case("za2h.d", AArch64::ZAD2)
2566 .Case("za3h.d", AArch64::ZAD3)
2567 .Case("za4h.d", AArch64::ZAD4)
2568 .Case("za5h.d", AArch64::ZAD5)
2569 .Case("za6h.d", AArch64::ZAD6)
2570 .Case("za7h.d", AArch64::ZAD7)
2571 .Case("za0h.s", AArch64::ZAS0)
2572 .Case("za1h.s", AArch64::ZAS1)
2573 .Case("za2h.s", AArch64::ZAS2)
2574 .Case("za3h.s", AArch64::ZAS3)
2575 .Case("za0h.h", AArch64::ZAH0)
2576 .Case("za1h.h", AArch64::ZAH1)
2577 .Case("za0h.b", AArch64::ZAB0)
2578 .Case("za0v.q", AArch64::ZAQ0)
2579 .Case("za1v.q", AArch64::ZAQ1)
2580 .Case("za2v.q", AArch64::ZAQ2)
2581 .Case("za3v.q", AArch64::ZAQ3)
2582 .Case("za4v.q", AArch64::ZAQ4)
2583 .Case("za5v.q", AArch64::ZAQ5)
2584 .Case("za6v.q", AArch64::ZAQ6)
2585 .Case("za7v.q", AArch64::ZAQ7)
2586 .Case("za8v.q", AArch64::ZAQ8)
2587 .Case("za9v.q", AArch64::ZAQ9)
2588 .Case("za10v.q", AArch64::ZAQ10)
2589 .Case("za11v.q", AArch64::ZAQ11)
2590 .Case("za12v.q", AArch64::ZAQ12)
2591 .Case("za13v.q", AArch64::ZAQ13)
2592 .Case("za14v.q", AArch64::ZAQ14)
2593 .Case("za15v.q", AArch64::ZAQ15)
2594 .Case("za0v.d", AArch64::ZAD0)
2595 .Case("za1v.d", AArch64::ZAD1)
2596 .Case("za2v.d", AArch64::ZAD2)
2597 .Case("za3v.d", AArch64::ZAD3)
2598 .Case("za4v.d", AArch64::ZAD4)
2599 .Case("za5v.d", AArch64::ZAD5)
2600 .Case("za6v.d", AArch64::ZAD6)
2601 .Case("za7v.d", AArch64::ZAD7)
2602 .Case("za0v.s", AArch64::ZAS0)
2603 .Case("za1v.s", AArch64::ZAS1)
2604 .Case("za2v.s", AArch64::ZAS2)
2605 .Case("za3v.s", AArch64::ZAS3)
2606 .Case("za0v.h", AArch64::ZAH0)
2607 .Case("za1v.h", AArch64::ZAH1)
2608 .Case("za0v.b", AArch64::ZAB0)
2609 .Default(0);
2610}
2611
2612bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2613 SMLoc &EndLoc) {
2614 return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
2615}
2616
2617OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2618 SMLoc &StartLoc,
2619 SMLoc &EndLoc) {
2620 StartLoc = getLoc();
2621 auto Res = tryParseScalarRegister(RegNo);
2622 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2623 return Res;
2624}
2625
2626// Matches a register name or register alias previously defined by '.req'
2627unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2628 RegKind Kind) {
2629 unsigned RegNum = 0;
2630 if ((RegNum = matchSVEDataVectorRegName(Name)))
2631 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2632
2633 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2634 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2635
2636 if ((RegNum = MatchNeonVectorRegName(Name)))
2637 return Kind == RegKind::NeonVector ? RegNum : 0;
2638
2639 if ((RegNum = matchMatrixRegName(Name)))
2640 return Kind == RegKind::Matrix ? RegNum : 0;
2641
2642 // The parsed register must be of RegKind Scalar
2643 if ((RegNum = MatchRegisterName(Name)))
2644 return Kind == RegKind::Scalar ? RegNum : 0;
2645
2646 if (!RegNum) {
2647 // Handle a few common aliases of registers.
2648 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2649 .Case("fp", AArch64::FP)
2650 .Case("lr", AArch64::LR)
2651 .Case("x31", AArch64::XZR)
2652 .Case("w31", AArch64::WZR)
2653 .Default(0))
2654 return Kind == RegKind::Scalar ? RegNum : 0;
2655
2656 // Check for aliases registered via .req. Canonicalize to lower case.
2657 // That's more consistent since register names are case insensitive, and
2658 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2659 auto Entry = RegisterReqs.find(Name.lower());
2660 if (Entry == RegisterReqs.end())
2661 return 0;
2662
2663 // set RegNum if the match is the right kind of register
2664 if (Kind == Entry->getValue().first)
2665 RegNum = Entry->getValue().second;
2666 }
2667 return RegNum;
2668}
2669
2670/// tryParseScalarRegister - Try to parse a register name. The token must be an
2671/// Identifier when called, and if it is a register name the token is eaten and
2672/// the register is added to the operand list.
2673OperandMatchResultTy
2674AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2675 const AsmToken &Tok = getTok();
2676 if (Tok.isNot(AsmToken::Identifier))
2677 return MatchOperand_NoMatch;
2678
2679 std::string lowerCase = Tok.getString().lower();
2680 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2681 if (Reg == 0)
2682 return MatchOperand_NoMatch;
2683
2684 RegNum = Reg;
2685 Lex(); // Eat identifier token.
2686 return MatchOperand_Success;
2687}
2688
2689/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2690OperandMatchResultTy
2691AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2692 SMLoc S = getLoc();
2693
2694 if (getTok().isNot(AsmToken::Identifier)) {
2695 Error(S, "Expected cN operand where 0 <= N <= 15");
2696 return MatchOperand_ParseFail;
2697 }
2698
2699 StringRef Tok = getTok().getIdentifier();
2700 if (Tok[0] != 'c' && Tok[0] != 'C') {
2701 Error(S, "Expected cN operand where 0 <= N <= 15");
2702 return MatchOperand_ParseFail;
2703 }
2704
2705 uint32_t CRNum;
2706 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2707 if (BadNum || CRNum > 15) {
2708 Error(S, "Expected cN operand where 0 <= N <= 15");
2709 return MatchOperand_ParseFail;
2710 }
2711
2712 Lex(); // Eat identifier token.
2713 Operands.push_back(
2714 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2715 return MatchOperand_Success;
2716}
2717
2718/// tryParsePrefetch - Try to parse a prefetch operand.
2719template <bool IsSVEPrefetch>
2720OperandMatchResultTy
2721AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2722 SMLoc S = getLoc();
2723 const AsmToken &Tok = getTok();
2724
2725 auto LookupByName = [](StringRef N) {
2726 if (IsSVEPrefetch) {
2727 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2728 return Optional<unsigned>(Res->Encoding);
2729 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2730 return Optional<unsigned>(Res->Encoding);
2731 return Optional<unsigned>();
2732 };
2733
2734 auto LookupByEncoding = [](unsigned E) {
2735 if (IsSVEPrefetch) {
2736 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2737 return Optional<StringRef>(Res->Name);
2738 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2739 return Optional<StringRef>(Res->Name);
2740 return Optional<StringRef>();
2741 };
2742 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2743
2744 // Either an identifier for named values or a 5-bit immediate.
2745 // Eat optional hash.
2746 if (parseOptionalToken(AsmToken::Hash) ||
2747 Tok.is(AsmToken::Integer)) {
2748 const MCExpr *ImmVal;
2749 if (getParser().parseExpression(ImmVal))
2750 return MatchOperand_ParseFail;
2751
2752 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2753 if (!MCE) {
2754 TokError("immediate value expected for prefetch operand");
2755 return MatchOperand_ParseFail;
2756 }
2757 unsigned prfop = MCE->getValue();
2758 if (prfop > MaxVal) {
2759 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2760 "] expected");
2761 return MatchOperand_ParseFail;
2762 }
2763
2764 auto PRFM = LookupByEncoding(MCE->getValue());
2765 Operands.push_back(AArch64Operand::CreatePrefetch(
2766 prfop, PRFM.getValueOr(""), S, getContext()));
2767 return MatchOperand_Success;
2768 }
2769
2770 if (Tok.isNot(AsmToken::Identifier)) {
2771 TokError("prefetch hint expected");
2772 return MatchOperand_ParseFail;
2773 }
2774
2775 auto PRFM = LookupByName(Tok.getString());
2776 if (!PRFM) {
2777 TokError("prefetch hint expected");
2778 return MatchOperand_ParseFail;
2779 }
2780
2781 Operands.push_back(AArch64Operand::CreatePrefetch(
2782 *PRFM, Tok.getString(), S, getContext()));
2783 Lex(); // Eat identifier token.
2784 return MatchOperand_Success;
2785}
2786
2787/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2788OperandMatchResultTy
2789AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2790 SMLoc S = getLoc();
2791 const AsmToken &Tok = getTok();
2792 if (Tok.isNot(AsmToken::Identifier)) {
2793 TokError("invalid operand for instruction");
2794 return MatchOperand_ParseFail;
2795 }
2796
2797 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2798 if (!PSB) {
2799 TokError("invalid operand for instruction");
2800 return MatchOperand_ParseFail;
2801 }
2802
2803 Operands.push_back(AArch64Operand::CreatePSBHint(
2804 PSB->Encoding, Tok.getString(), S, getContext()));
2805 Lex(); // Eat identifier token.
2806 return MatchOperand_Success;
2807}
2808
2809/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2810OperandMatchResultTy
2811AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2812 SMLoc S = getLoc();
2813 const AsmToken &Tok = getTok();
2814 if (Tok.isNot(AsmToken::Identifier)) {
2815 TokError("invalid operand for instruction");
2816 return MatchOperand_ParseFail;
2817 }
2818
2819 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2820 if (!BTI) {
2821 TokError("invalid operand for instruction");
2822 return MatchOperand_ParseFail;
2823 }
2824
2825 Operands.push_back(AArch64Operand::CreateBTIHint(
2826 BTI->Encoding, Tok.getString(), S, getContext()));
2827 Lex(); // Eat identifier token.
2828 return MatchOperand_Success;
2829}
2830
2831/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2832/// instruction.
2833OperandMatchResultTy
2834AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2835 SMLoc S = getLoc();
2836 const MCExpr *Expr = nullptr;
2837
2838 if (getTok().is(AsmToken::Hash)) {
2839 Lex(); // Eat hash token.
2840 }
2841
2842 if (parseSymbolicImmVal(Expr))
2843 return MatchOperand_ParseFail;
2844
2845 AArch64MCExpr::VariantKind ELFRefKind;
2846 MCSymbolRefExpr::VariantKind DarwinRefKind;
2847 int64_t Addend;
2848 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2849 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2850 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2851 // No modifier was specified at all; this is the syntax for an ELF basic
2852 // ADRP relocation (unfortunately).
2853 Expr =
2854 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2855 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2856 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2857 Addend != 0) {
2858 Error(S, "gotpage label reference not allowed an addend");
2859 return MatchOperand_ParseFail;
2860 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2861 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2862 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2863 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2864 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2865 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
2866 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2867 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2868 // The operand must be an @page or @gotpage qualified symbolref.
2869 Error(S, "page or gotpage label reference expected");
2870 return MatchOperand_ParseFail;
2871 }
2872 }
2873
2874 // We have either a label reference possibly with addend or an immediate. The
2875 // addend is a raw value here. The linker will adjust it to only reference the
2876 // page.
2877 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2878 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2879
2880 return MatchOperand_Success;
2881}
2882
2883/// tryParseAdrLabel - Parse and validate a source label for the ADR
2884/// instruction.
2885OperandMatchResultTy
2886AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2887 SMLoc S = getLoc();
2888 const MCExpr *Expr = nullptr;
2889
2890 // Leave anything with a bracket to the default for SVE
2891 if (getTok().is(AsmToken::LBrac))
2892 return MatchOperand_NoMatch;
2893
2894 if (getTok().is(AsmToken::Hash))
2895 Lex(); // Eat hash token.
2896
2897 if (parseSymbolicImmVal(Expr))
2898 return MatchOperand_ParseFail;
2899
2900 AArch64MCExpr::VariantKind ELFRefKind;
2901 MCSymbolRefExpr::VariantKind DarwinRefKind;
2902 int64_t Addend;
2903 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2904 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2905 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2906 // No modifier was specified at all; this is the syntax for an ELF basic
2907 // ADR relocation (unfortunately).
2908 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2909 } else {
2910 Error(S, "unexpected adr label");
2911 return MatchOperand_ParseFail;
2912 }
2913 }
2914
2915 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2916 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2917 return MatchOperand_Success;
2918}
2919
2920/// tryParseFPImm - A floating point immediate expression operand.
2921template<bool AddFPZeroAsLiteral>
2922OperandMatchResultTy
2923AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2924 SMLoc S = getLoc();
2925
2926 bool Hash = parseOptionalToken(AsmToken::Hash);
2927
2928 // Handle negation, as that still comes through as a separate token.
2929 bool isNegative = parseOptionalToken(AsmToken::Minus);
2930
2931 const AsmToken &Tok = getTok();
2932 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2933 if (!Hash)
2934 return MatchOperand_NoMatch;
2935 TokError("invalid floating point immediate");
2936 return MatchOperand_ParseFail;
2937 }
2938
2939 // Parse hexadecimal representation.
2940 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2941 if (Tok.getIntVal() > 255 || isNegative) {
2942 TokError("encoded floating point value out of range");
2943 return MatchOperand_ParseFail;
2944 }
2945
2946 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2947 Operands.push_back(
2948 AArch64Operand::CreateFPImm(F, true, S, getContext()));
2949 } else {
2950 // Parse FP representation.
2951 APFloat RealVal(APFloat::IEEEdouble());
2952 auto StatusOrErr =
2953 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2954 if (errorToBool(StatusOrErr.takeError())) {
2955 TokError("invalid floating point representation");
2956 return MatchOperand_ParseFail;
2957 }
2958
2959 if (isNegative)
2960 RealVal.changeSign();
2961
2962 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2963 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
2964 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
2965 } else
2966 Operands.push_back(AArch64Operand::CreateFPImm(
2967 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2968 }
2969
2970 Lex(); // Eat the token.
2971
2972 return MatchOperand_Success;
2973}
2974
2975/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2976/// a shift suffix, for example '#1, lsl #12'.
2977OperandMatchResultTy
2978AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2979 SMLoc S = getLoc();
2980
2981 if (getTok().is(AsmToken::Hash))
2982 Lex(); // Eat '#'
2983 else if (getTok().isNot(AsmToken::Integer))
2984 // Operand should start from # or should be integer, emit error otherwise.
2985 return MatchOperand_NoMatch;
2986
2987 const MCExpr *Imm = nullptr;
2988 if (parseSymbolicImmVal(Imm))
2989 return MatchOperand_ParseFail;
2990 else if (getTok().isNot(AsmToken::Comma)) {
2991 Operands.push_back(
2992 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
2993 return MatchOperand_Success;
2994 }
2995
2996 // Eat ','
2997 Lex();
2998
2999 // The optional operand must be "lsl #N" where N is non-negative.
3000 if (!getTok().is(AsmToken::Identifier) ||
3001 !getTok().getIdentifier().equals_insensitive("lsl")) {
3002 Error(getLoc(), "only 'lsl #+N' valid after immediate");
3003 return MatchOperand_ParseFail;
3004 }
3005
3006 // Eat 'lsl'
3007 Lex();
3008
3009 parseOptionalToken(AsmToken::Hash);
3010
3011 if (getTok().isNot(AsmToken::Integer)) {
3012 Error(getLoc(), "only 'lsl #+N' valid after immediate");
3013 return MatchOperand_ParseFail;
3014 }
3015
3016 int64_t ShiftAmount = getTok().getIntVal();
3017
3018 if (ShiftAmount < 0) {
3019 Error(getLoc(), "positive shift amount required");
3020 return MatchOperand_ParseFail;
3021 }
3022 Lex(); // Eat the number
3023
3024 // Just in case the optional lsl #0 is used for immediates other than zero.
3025 if (ShiftAmount == 0 && Imm != nullptr) {
3026 Operands.push_back(
3027 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3028 return MatchOperand_Success;
3029 }
3030
3031 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3032 getLoc(), getContext()));
3033 return MatchOperand_Success;
3034}
3035
3036/// parseCondCodeString - Parse a Condition Code string.
3037AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
3038 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3039 .Case("eq", AArch64CC::EQ)
3040 .Case("ne", AArch64CC::NE)
3041 .Case("cs", AArch64CC::HS)
3042 .Case("hs", AArch64CC::HS)
3043 .Case("cc", AArch64CC::LO)
3044 .Case("lo", AArch64CC::LO)
3045 .Case("mi", AArch64CC::MI)
3046 .Case("pl", AArch64CC::PL)
3047 .Case("vs", AArch64CC::VS)
3048 .Case("vc", AArch64CC::VC)
3049 .Case("hi", AArch64CC::HI)
3050 .Case("ls", AArch64CC::LS)
3051 .Case("ge", AArch64CC::GE)
3052 .Case("lt", AArch64CC::LT)
3053 .Case("gt", AArch64CC::GT)
3054 .Case("le", AArch64CC::LE)
3055 .Case("al", AArch64CC::AL)
3056 .Case("nv", AArch64CC::NV)
3057 .Default(AArch64CC::Invalid);
3058
3059 if (CC == AArch64CC::Invalid &&
3060 getSTI().getFeatureBits()[AArch64::FeatureSVE])
3061 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3062 .Case("none", AArch64CC::EQ)
3063 .Case("any", AArch64CC::NE)
3064 .Case("nlast", AArch64CC::HS)
3065 .Case("last", AArch64CC::LO)
3066 .Case("first", AArch64CC::MI)
3067 .Case("nfrst", AArch64CC::PL)
3068 .Case("pmore", AArch64CC::HI)
3069 .Case("plast", AArch64CC::LS)
3070 .Case("tcont", AArch64CC::GE)
3071 .Case("tstop", AArch64CC::LT)
3072 .Default(AArch64CC::Invalid);
3073
3074 return CC;
3075}
3076
3077/// parseCondCode - Parse a Condition Code operand.
3078bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3079 bool invertCondCode) {
3080 SMLoc S = getLoc();
3081 const AsmToken &Tok = getTok();
3082 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")(static_cast <bool> (Tok.is(AsmToken::Identifier) &&
"Token is not an Identifier") ? void (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3082, __extension__ __PRETTY_FUNCTION__))
;
3083
3084 StringRef Cond = Tok.getString();
3085 AArch64CC::CondCode CC = parseCondCodeString(Cond);
3086 if (CC == AArch64CC::Invalid)
3087 return TokError("invalid condition code");
3088 Lex(); // Eat identifier token.
3089
3090 if (invertCondCode) {
3091 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3092 return TokError("condition codes AL and NV are invalid for this instruction");
3093 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
3094 }
3095
3096 Operands.push_back(
3097 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3098 return false;
3099}
3100
3101OperandMatchResultTy
3102AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3103 const AsmToken &Tok = getTok();
3104 SMLoc S = getLoc();
3105
3106 if (Tok.isNot(AsmToken::Identifier)) {
3107 TokError("invalid operand for instruction");
3108 return MatchOperand_ParseFail;
3109 }
3110
3111 unsigned PStateImm = -1;
3112 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3113 if (SVCR && SVCR->haveFeatures(getSTI().getFeatureBits()))
3114 PStateImm = SVCR->Encoding;
3115
3116 Operands.push_back(
3117 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3118 Lex(); // Eat identifier token.
3119 return MatchOperand_Success;
3120}
3121
3122OperandMatchResultTy
3123AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3124 const AsmToken &Tok = getTok();
3125 SMLoc S = getLoc();
3126
3127 StringRef Name = Tok.getString();
3128
3129 if (Name.equals_insensitive("za")) {
3130 Lex(); // eat "za"
3131 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3132 AArch64::ZA, /*ElementWidth=*/0, MatrixKind::Array, S, getLoc(),
3133 getContext()));
3134 if (getLexer().is(AsmToken::LBrac)) {
3135 // There's no comma after matrix operand, so we can parse the next operand
3136 // immediately.
3137 if (parseOperand(Operands, false, false))
3138 return MatchOperand_NoMatch;
3139 }
3140 return MatchOperand_Success;
3141 }
3142
3143 // Try to parse matrix register.
3144 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3145 if (!Reg)
3146 return MatchOperand_NoMatch;
3147
3148 size_t DotPosition = Name.find('.');
3149 assert(DotPosition != StringRef::npos && "Unexpected register")(static_cast <bool> (DotPosition != StringRef::npos &&
"Unexpected register") ? void (0) : __assert_fail ("DotPosition != StringRef::npos && \"Unexpected register\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3149, __extension__ __PRETTY_FUNCTION__))
;
3150
3151 StringRef Head = Name.take_front(DotPosition);
3152 StringRef Tail = Name.drop_front(DotPosition);
3153 StringRef RowOrColumn = Head.take_back();
3154
3155 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn)
3156 .Case("h", MatrixKind::Row)
3157 .Case("v", MatrixKind::Col)
3158 .Default(MatrixKind::Tile);
3159
3160 // Next up, parsing the suffix
3161 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3162 if (!KindRes) {
3163 TokError("Expected the register to be followed by element width suffix");
3164 return MatchOperand_ParseFail;
3165 }
3166 unsigned ElementWidth = KindRes->second;
3167
3168 Lex();
3169
3170 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3171 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3172
3173 if (getLexer().is(AsmToken::LBrac)) {
3174 // There's no comma after matrix operand, so we can parse the next operand
3175 // immediately.
3176 if (parseOperand(Operands, false, false))
3177 return MatchOperand_NoMatch;
3178 }
3179 return MatchOperand_Success;
3180}
3181
3182/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3183/// them if present.
3184OperandMatchResultTy
3185AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3186 const AsmToken &Tok = getTok();
3187 std::string LowerID = Tok.getString().lower();
3188 AArch64_AM::ShiftExtendType ShOp =
3189 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3190 .Case("lsl", AArch64_AM::LSL)
3191 .Case("lsr", AArch64_AM::LSR)
3192 .Case("asr", AArch64_AM::ASR)
3193 .Case("ror", AArch64_AM::ROR)
3194 .Case("msl", AArch64_AM::MSL)
3195 .Case("uxtb", AArch64_AM::UXTB)
3196 .Case("uxth", AArch64_AM::UXTH)
3197 .Case("uxtw", AArch64_AM::UXTW)
3198 .Case("uxtx", AArch64_AM::UXTX)
3199 .Case("sxtb", AArch64_AM::SXTB)
3200 .Case("sxth", AArch64_AM::SXTH)
3201 .Case("sxtw", AArch64_AM::SXTW)
3202 .Case("sxtx", AArch64_AM::SXTX)
3203 .Default(AArch64_AM::InvalidShiftExtend);
3204
3205 if (ShOp == AArch64_AM::InvalidShiftExtend)
3206 return MatchOperand_NoMatch;
3207
3208 SMLoc S = Tok.getLoc();
3209 Lex();
3210
3211 bool Hash = parseOptionalToken(AsmToken::Hash);
3212
3213 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3214 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3215 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3216 ShOp == AArch64_AM::MSL) {
3217 // We expect a number here.
3218 TokError("expected #imm after shift specifier");
3219 return MatchOperand_ParseFail;
3220 }
3221
3222 // "extend" type operations don't need an immediate, #0 is implicit.
3223 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3224 Operands.push_back(
3225 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3226 return MatchOperand_Success;
3227 }
3228
3229 // Make sure we do actually have a number, identifier or a parenthesized
3230 // expression.
3231 SMLoc E = getLoc();
3232 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3233 !getTok().is(AsmToken::Identifier)) {
3234 Error(E, "expected integer shift amount");
3235 return MatchOperand_ParseFail;
3236 }
3237
3238 const MCExpr *ImmVal;
3239 if (getParser().parseExpression(ImmVal))
3240 return MatchOperand_ParseFail;
3241
3242 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3243 if (!MCE) {
3244 Error(E, "expected constant '#imm' after shift specifier");
3245 return MatchOperand_ParseFail;
3246 }
3247
3248 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3249 Operands.push_back(AArch64Operand::CreateShiftExtend(
3250 ShOp, MCE->getValue(), true, S, E, getContext()));
3251 return MatchOperand_Success;
3252}
3253
3254static const struct Extension {
3255 const char *Name;
3256 const FeatureBitset Features;
3257} ExtensionMap[] = {
3258 {"crc", {AArch64::FeatureCRC}},
3259 {"sm4", {AArch64::FeatureSM4}},
3260 {"sha3", {AArch64::FeatureSHA3}},
3261 {"sha2", {AArch64::FeatureSHA2}},
3262 {"aes", {AArch64::FeatureAES}},
3263 {"crypto", {AArch64::FeatureCrypto}},
3264 {"fp", {AArch64::FeatureFPARMv8}},
3265 {"simd", {AArch64::FeatureNEON}},
3266 {"ras", {AArch64::FeatureRAS}},
3267 {"lse", {AArch64::FeatureLSE}},
3268 {"predres", {AArch64::FeaturePredRes}},
3269 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3270 {"mte", {AArch64::FeatureMTE}},
3271 {"memtag", {AArch64::FeatureMTE}},
3272 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3273 {"pan", {AArch64::FeaturePAN}},
3274 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3275 {"ccpp", {AArch64::FeatureCCPP}},
3276 {"rcpc", {AArch64::FeatureRCPC}},
3277 {"rng", {AArch64::FeatureRandGen}},
3278 {"sve", {AArch64::FeatureSVE}},
3279 {"sve2", {AArch64::FeatureSVE2}},
3280 {"sve2-aes", {AArch64::FeatureSVE2AES}},
3281 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3282 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3283 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3284 {"ls64", {AArch64::FeatureLS64}},
3285 {"xs", {AArch64::FeatureXS}},
3286 {"pauth", {AArch64::FeaturePAuth}},
3287 {"flagm", {AArch64::FeatureFlagM}},
3288 {"rme", {AArch64::FeatureRME}},
3289 {"sme", {AArch64::FeatureSME}},
3290 {"sme-f64", {AArch64::FeatureSMEF64}},
3291 {"sme-i64", {AArch64::FeatureSMEI64}},
3292 // FIXME: Unsupported extensions
3293 {"lor", {}},
3294 {"rdma", {}},
3295 {"profile", {}},
3296};
3297
3298static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3299 if (FBS[AArch64::HasV8_1aOps])
3300 Str += "ARMv8.1a";
3301 else if (FBS[AArch64::HasV8_2aOps])
3302 Str += "ARMv8.2a";
3303 else if (FBS[AArch64::HasV8_3aOps])
3304 Str += "ARMv8.3a";
3305 else if (FBS[AArch64::HasV8_4aOps])
3306 Str += "ARMv8.4a";
3307 else if (FBS[AArch64::HasV8_5aOps])
3308 Str += "ARMv8.5a";
3309 else if (FBS[AArch64::HasV8_6aOps])
3310 Str += "ARMv8.6a";
3311 else if (FBS[AArch64::HasV8_7aOps])
3312 Str += "ARMv8.7a";
3313 else {
3314 SmallVector<std::string, 2> ExtMatches;
3315 for (const auto& Ext : ExtensionMap) {
3316 // Use & in case multiple features are enabled
3317 if ((FBS & Ext.Features) != FeatureBitset())
3318 ExtMatches.push_back(Ext.Name);
3319 }
3320 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3321 }
3322}
3323
3324void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3325 SMLoc S) {
3326 const uint16_t Op2 = Encoding & 7;
3327 const uint16_t Cm = (Encoding & 0x78) >> 3;
3328 const uint16_t Cn = (Encoding & 0x780) >> 7;
3329 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3330
3331 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3332
3333 Operands.push_back(
3334 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3335 Operands.push_back(
3336 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3337 Operands.push_back(
3338 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3339 Expr = MCConstantExpr::create(Op2, getContext());
3340 Operands.push_back(
3341 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3342}
3343
3344/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3345/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3346bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3347 OperandVector &Operands) {
3348 if (Name.find('.') != StringRef::npos)
3349 return TokError("invalid operand");
3350
3351 Mnemonic = Name;
3352 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3353
3354 const AsmToken &Tok = getTok();
3355 StringRef Op = Tok.getString();
3356 SMLoc S = Tok.getLoc();
3357
3358 if (Mnemonic == "ic") {
3359 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3360 if (!IC)
3361 return TokError("invalid operand for IC instruction");
3362 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3363 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3364 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
3365 return TokError(Str);
3366 }
3367 createSysAlias(IC->Encoding, Operands, S);
3368 } else if (Mnemonic == "dc") {
3369 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3370 if (!DC)
3371 return TokError("invalid operand for DC instruction");
3372 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3373 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3374 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3375 return TokError(Str);
3376 }
3377 createSysAlias(DC->Encoding, Operands, S);
3378 } else if (Mnemonic == "at") {
3379 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3380 if (!AT)
3381 return TokError("invalid operand for AT instruction");
3382 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3383 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3384 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
3385 return TokError(Str);
3386 }
3387 createSysAlias(AT->Encoding, Operands, S);
3388 } else if (Mnemonic == "tlbi") {
3389 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3390 if (!TLBI)
3391 return TokError("invalid operand for TLBI instruction");
3392 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3393 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3394 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
3395 return TokError(Str);
3396 }
3397 createSysAlias(TLBI->Encoding, Operands, S);
3398 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
3399 const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
3400 if (!PRCTX)
3401 return TokError("invalid operand for prediction restriction instruction");
3402 else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
3403 std::string Str(
3404 Mnemonic.upper() + std::string(PRCTX->Name) + " requires: ");
3405 setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
3406 return TokError(Str);
3407 }
3408 uint16_t PRCTX_Op2 =
3409 Mnemonic == "cfp" ? 4 :
3410 Mnemonic == "dvp" ? 5 :
3411 Mnemonic == "cpp" ? 7 :
3412 0;
3413 assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction")(static_cast <bool> (PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction"
) ? void (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3413, __extension__ __PRETTY_FUNCTION__))
;
3414 createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
3415 }
3416
3417 Lex(); // Eat operand.
3418
3419 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3420 bool HasRegister = false;
3421
3422 // Check for the optional register operand.
3423 if (parseOptionalToken(AsmToken::Comma)) {
3424 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3425 return TokError("expected register operand");
3426 HasRegister = true;
3427 }
3428
3429 if (ExpectRegister && !HasRegister)
3430 return TokError("specified " + Mnemonic + " op requires a register");
3431 else if (!ExpectRegister && HasRegister)
3432 return TokError("specified " + Mnemonic + " op does not use a register");
3433
3434 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3435 return true;
3436
3437 return false;
3438}
3439
3440OperandMatchResultTy
3441AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3442 MCAsmParser &Parser = getParser();
3443 const AsmToken &Tok = getTok();
3444
3445 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3446 TokError("'csync' operand expected");
3447 return MatchOperand_ParseFail;
3448 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3449 // Immediate operand.
3450 const MCExpr *ImmVal;
3451 SMLoc ExprLoc = getLoc();
3452 AsmToken IntTok = Tok;
3453 if (getParser().parseExpression(ImmVal))
3454 return MatchOperand_ParseFail;
3455 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3456 if (!MCE) {
3457 Error(ExprLoc, "immediate value expected for barrier operand");
3458 return MatchOperand_ParseFail;
3459 }
3460 int64_t Value = MCE->getValue();
3461 if (Mnemonic == "dsb" && Value > 15) {
3462 // This case is a no match here, but it might be matched by the nXS
3463 // variant. Deliberately not unlex the optional '#' as it is not necessary
3464 // to characterize an integer immediate.
3465 Parser.getLexer().UnLex(IntTok);
3466 return MatchOperand_NoMatch;
3467 }
3468 if (Value < 0 || Value > 15) {
3469 Error(ExprLoc, "barrier operand out of range");
3470 return MatchOperand_ParseFail;
3471 }
3472 auto DB = AArch64DB::lookupDBByEncoding(Value);
3473 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3474 ExprLoc, getContext(),
3475 false /*hasnXSModifier*/));
3476 return MatchOperand_Success;
3477 }
3478
3479 if (Tok.isNot(AsmToken::Identifier)) {
3480 TokError("invalid operand for instruction");
3481 return MatchOperand_ParseFail;
3482 }
3483
3484 StringRef Operand = Tok.getString();
3485 auto TSB = AArch64TSB::lookupTSBByName(Operand);
3486 auto DB = AArch64DB::lookupDBByName(Operand);
3487 // The only valid named option for ISB is 'sy'
3488 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3489 TokError("'sy' or #imm operand expected");
3490 return MatchOperand_ParseFail;
3491 // The only valid named option for TSB is 'csync'
3492 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3493 TokError("'csync' operand expected");
3494 return MatchOperand_ParseFail;
3495 } else if (!DB && !TSB) {
3496 if (Mnemonic == "dsb") {
3497 // This case is a no match here, but it might be matched by the nXS
3498 // variant.
3499 return MatchOperand_NoMatch;
3500 }
3501 TokError("invalid barrier option name");
3502 return MatchOperand_ParseFail;
3503 }
3504
3505 Operands.push_back(AArch64Operand::CreateBarrier(
3506 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3507 getContext(), false /*hasnXSModifier*/));
3508 Lex(); // Consume the option
3509
3510 return MatchOperand_Success;
3511}
3512
3513OperandMatchResultTy
3514AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3515 const AsmToken &Tok = getTok();
3516
3517 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands")(static_cast <bool> (Mnemonic == "dsb" && "Instruction does not accept nXS operands"
) ? void (0) : __assert_fail ("Mnemonic == \"dsb\" && \"Instruction does not accept nXS operands\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3517, __extension__ __PRETTY_FUNCTION__))
;
3518 if (Mnemonic != "dsb")
3519 return MatchOperand_ParseFail;
3520
3521 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3522 // Immediate operand.
3523 const MCExpr *ImmVal;
3524 SMLoc ExprLoc = getLoc();
3525 if (getParser().parseExpression(ImmVal))
3526 return MatchOperand_ParseFail;
3527 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3528 if (!MCE) {
3529 Error(ExprLoc, "immediate value expected for barrier operand");
3530 return MatchOperand_ParseFail;
3531 }
3532 int64_t Value = MCE->getValue();
3533 // v8.7-A DSB in the nXS variant accepts only the following immediate
3534 // values: 16, 20, 24, 28.
3535 if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
3536 Error(ExprLoc, "barrier operand out of range");
3537 return MatchOperand_ParseFail;
3538 }
3539 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
3540 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
3541 ExprLoc, getContext(),
3542 true /*hasnXSModifier*/));
3543 return MatchOperand_Success;
3544 }
3545
3546 if (Tok.isNot(AsmToken::Identifier)) {
3547 TokError("invalid operand for instruction");
3548 return MatchOperand_ParseFail;
3549 }
3550
3551 StringRef Operand = Tok.getString();
3552 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
3553
3554 if (!DB) {
3555 TokError("invalid barrier option name");
3556 return MatchOperand_ParseFail;
3557 }
3558
3559 Operands.push_back(
3560 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
3561 getContext(), true /*hasnXSModifier*/));
3562 Lex(); // Consume the option
3563
3564 return MatchOperand_Success;
3565}
3566
3567OperandMatchResultTy
3568AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3569 const AsmToken &Tok = getTok();
3570
3571 if (Tok.isNot(AsmToken::Identifier))
3572 return MatchOperand_NoMatch;
3573
3574 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
3575 return MatchOperand_NoMatch;
3576
3577 int MRSReg, MSRReg;
3578 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3579 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3580 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3581 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3582 } else
3583 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3584
3585 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3586 unsigned PStateImm = -1;
3587 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3588 PStateImm = PState->Encoding;
3589
3590 Operands.push_back(
3591 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3592 PStateImm, getContext()));
3593 Lex(); // Eat identifier
3594
3595 return MatchOperand_Success;
3596}
3597
3598/// tryParseNeonVectorRegister - Parse a vector register operand.
3599bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3600 if (getTok().isNot(AsmToken::Identifier))
3601 return true;
3602
3603 SMLoc S = getLoc();
3604 // Check for a vector register specifier first.
3605 StringRef Kind;
3606 unsigned Reg;
3607 OperandMatchResultTy Res =
3608 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3609 if (Res != MatchOperand_Success)
3610 return true;
3611
3612 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3613 if (!KindRes)
3614 return true;
3615
3616 unsigned ElementWidth = KindRes->second;
3617 Operands.push_back(
3618 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3619 S, getLoc(), getContext()));
3620
3621 // If there was an explicit qualifier, that goes on as a literal text
3622 // operand.
3623 if (!Kind.empty())
3624 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
3625
3626 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3627}
3628
3629OperandMatchResultTy
3630AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3631 SMLoc SIdx = getLoc();
3632 if (parseOptionalToken(AsmToken::LBrac)) {
3633 const MCExpr *ImmVal;
3634 if (getParser().parseExpression(ImmVal))
3635 return MatchOperand_NoMatch;
3636 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3637 if (!MCE) {
3638 TokError("immediate value expected for vector index");
3639 return MatchOperand_ParseFail;;
3640 }
3641
3642 SMLoc E = getLoc();
3643
3644 if (parseToken(AsmToken::RBrac, "']' expected"))
3645 return MatchOperand_ParseFail;;
3646
3647 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3648 E, getContext()));
3649 return MatchOperand_Success;
3650 }
3651
3652 return MatchOperand_NoMatch;
3653}
3654
3655// tryParseVectorRegister - Try to parse a vector register name with
3656// optional kind specifier. If it is a register specifier, eat the token
3657// and return it.
3658OperandMatchResultTy
3659AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3660 RegKind MatchKind) {
3661 const AsmToken &Tok = getTok();
3662
3663 if (Tok.isNot(AsmToken::Identifier))
3664 return MatchOperand_NoMatch;
3665
3666 StringRef Name = Tok.getString();
3667 // If there is a kind specifier, it's separated from the register name by
3668 // a '.'.
3669 size_t Start = 0, Next = Name.find('.');
3670 StringRef Head = Name.slice(Start, Next);
3671 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3672
3673 if (RegNum) {
3674 if (Next != StringRef::npos) {
3675 Kind = Name.slice(Next, StringRef::npos);
3676 if (!isValidVectorKind(Kind, MatchKind)) {
3677 TokError("invalid vector kind qualifier");
3678 return MatchOperand_ParseFail;
3679 }
3680 }
3681 Lex(); // Eat the register token.
3682
3683 Reg = RegNum;
3684 return MatchOperand_Success;
3685 }
3686
3687 return MatchOperand_NoMatch;
3688}
3689
3690/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3691OperandMatchResultTy
3692AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3693 // Check for a SVE predicate register specifier first.
3694 const SMLoc S = getLoc();
3695 StringRef Kind;
3696 unsigned RegNum;
3697 auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3698 if (Res != MatchOperand_Success)
3699 return Res;
3700
3701 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3702 if (!KindRes)
3703 return MatchOperand_NoMatch;
3704
3705 unsigned ElementWidth = KindRes->second;
3706 Operands.push_back(AArch64Operand::CreateVectorReg(
3707 RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3708 getLoc(), getContext()));
3709
3710 if (getLexer().is(AsmToken::LBrac)) {
3711 // Indexed predicate, there's no comma so try parse the next operand
3712 // immediately.
3713 if (parseOperand(Operands, false, false))
3714 return MatchOperand_NoMatch;
3715 }
3716
3717 // Not all predicates are followed by a '/m' or '/z'.
3718 if (getTok().isNot(AsmToken::Slash))
3719 return MatchOperand_Success;
3720
3721 // But when they do they shouldn't have an element type suffix.
3722 if (!Kind.empty()) {
3723 Error(S, "not expecting size suffix");
3724 return MatchOperand_ParseFail;
3725 }
3726
3727 // Add a literal slash as operand
3728 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
3729
3730 Lex(); // Eat the slash.
3731
3732 // Zeroing or merging?
3733 auto Pred = getTok().getString().lower();
3734 if (Pred != "z" && Pred != "m") {
3735 Error(getLoc(), "expecting 'm' or 'z' predication");
3736 return MatchOperand_ParseFail;
3737 }
3738
3739 // Add zero/merge token.
3740 const char *ZM = Pred == "z" ? "z" : "m";
3741 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
3742
3743 Lex(); // Eat zero/merge token.
3744 return MatchOperand_Success;
3745}
3746
3747/// parseRegister - Parse a register operand.
3748bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3749 // Try for a Neon vector register.
3750 if (!tryParseNeonVectorRegister(Operands))
3751 return false;
3752
3753 // Otherwise try for a scalar register.
3754 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3755 return false;
3756
3757 return true;
3758}
3759
3760bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3761 bool HasELFModifier = false;
3762 AArch64MCExpr::VariantKind RefKind;
3763
3764 if (parseOptionalToken(AsmToken::Colon)) {
3765 HasELFModifier = true;
3766
3767 if (getTok().isNot(AsmToken::Identifier))
3768 return TokError("expect relocation specifier in operand after ':'");
3769
3770 std::string LowerCase = getTok().getIdentifier().lower();
3771 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3772 .Case("lo12", AArch64MCExpr::VK_LO12)
3773 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3774 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3775 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3776 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3777 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3778 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3779 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3780 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3781 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3782 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3783 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3784 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3785 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3786 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3787 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3788 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3789 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3790 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3791 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3792 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3793 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3794 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3795 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3796 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3797 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3798 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3799 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3800 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3801 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3802 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3803 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3804 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3805 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3806 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3807 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3808 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3809 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
3810 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3811 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3812 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3813 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3814 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3815 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3816 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3817 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3818 .Default(AArch64MCExpr::VK_INVALID);
3819
3820 if (RefKind == AArch64MCExpr::VK_INVALID)
3821 return TokError("expect relocation specifier in operand after ':'");
3822
3823 Lex(); // Eat identifier
3824
3825 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3826 return true;
3827 }
3828
3829 if (getParser().parseExpression(ImmVal))
3830 return true;
3831
3832 if (HasELFModifier)
3833 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3834
3835 return false;
3836}
3837
3838OperandMatchResultTy
3839AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
3840 if (getTok().isNot(AsmToken::LCurly))
3841 return MatchOperand_NoMatch;
3842
3843 auto ParseMatrixTile = [this](unsigned &Reg, unsigned &ElementWidth) {
3844 StringRef Name = getTok().getString();
3845 size_t DotPosition = Name.find('.');
3846 if (DotPosition == StringRef::npos)
3847 return MatchOperand_NoMatch;
3848
3849 unsigned RegNum = matchMatrixTileListRegName(Name);
3850 if (!RegNum)
3851 return MatchOperand_NoMatch;
3852
3853 StringRef Tail = Name.drop_front(DotPosition);
3854 const Optional<std::pair<int, int>> &KindRes =
3855 parseVectorKind(Tail, RegKind::Matrix);
3856 if (!KindRes) {
3857 TokError("Expected the register to be followed by element width suffix");
3858 return MatchOperand_ParseFail;
3859 }
3860 ElementWidth = KindRes->second;
3861 Reg = RegNum;
3862 Lex(); // Eat the register.
3863 return MatchOperand_Success;
3864 };
3865
3866 SMLoc S = getLoc();
3867 auto LCurly = getTok();
3868 Lex(); // Eat left bracket token.
3869
3870 // Empty matrix list
3871 if (parseOptionalToken(AsmToken::RCurly)) {
3872 Operands.push_back(AArch64Operand::CreateMatrixTileList(
3873 /*RegMask=*/0, S, getLoc(), getContext()));
3874 return MatchOperand_Success;
3875 }
3876
3877 // Try parse {za} alias early
3878 if (getTok().getString().equals_insensitive("za")) {
3879 Lex(); // Eat 'za'
3880
3881 if (parseToken(AsmToken::RCurly, "'}' expected"))
3882 return MatchOperand_ParseFail;
3883
3884 Operands.push_back(AArch64Operand::CreateMatrixTileList(
3885 /*RegMask=*/0xFF, S, getLoc(), getContext()));
3886 return MatchOperand_Success;
3887 }
3888
3889 SMLoc TileLoc = getLoc();
3890
3891 unsigned FirstReg, ElementWidth;
3892 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
3893 if (ParseRes != MatchOperand_Success) {
3894 getLexer().UnLex(LCurly);
3895 return ParseRes;
3896 }
3897
3898 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3899
3900 unsigned PrevReg = FirstReg;
3901 unsigned Count = 1;
3902
3903 SmallSet<unsigned, 8> DRegs;
3904 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
3905
3906 SmallSet<unsigned, 8> SeenRegs;
3907 SeenRegs.insert(FirstReg);
3908
3909 while (parseOptionalToken(AsmToken::Comma)) {
3910 TileLoc = getLoc();
3911 unsigned Reg, NextElementWidth;
3912 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
3913 if (ParseRes != MatchOperand_Success)
3914 return ParseRes;
3915
3916 // Element size must match on all regs in the list.
3917 if (ElementWidth != NextElementWidth) {
3918 Error(TileLoc, "mismatched register size suffix");
3919 return MatchOperand_ParseFail;
3920 }
3921
3922 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
3923 Warning(TileLoc, "tile list not in ascending order");
3924
3925 if (SeenRegs.contains(Reg))
3926 Warning(TileLoc, "duplicate tile in list");
3927 else {
3928 SeenRegs.insert(Reg);
3929 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
3930 }
3931
3932 PrevReg = Reg;
3933 ++Count;
3934 }
3935
3936 if (parseToken(AsmToken::RCurly, "'}' expected"))
3937 return MatchOperand_ParseFail;
3938
3939 unsigned RegMask = 0;
3940 for (auto Reg : DRegs)
3941 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
3942 RI->getEncodingValue(AArch64::ZAD0));
3943 Operands.push_back(
3944 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
3945
3946 return MatchOperand_Success;
3947}
3948
3949template <RegKind VectorKind>
3950OperandMatchResultTy
3951AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3952 bool ExpectMatch) {
3953 MCAsmParser &Parser = getParser();
3954 if (!getTok().is(AsmToken::LCurly))
3955 return MatchOperand_NoMatch;
3956
3957 // Wrapper around parse function
3958 auto ParseVector = [this](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3959 bool NoMatchIsError) {
3960 auto RegTok = getTok();
3961 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3962 if (ParseRes == MatchOperand_Success) {
3963 if (parseVectorKind(Kind, VectorKind))
3964 return ParseRes;
3965 llvm_unreachable("Expected a valid vector kind")::llvm::llvm_unreachable_internal("Expected a valid vector kind"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3965)
;
3966 }
3967
3968 if (RegTok.isNot(AsmToken::Identifier) ||
3969 ParseRes == MatchOperand_ParseFail ||
3970 (ParseRes == MatchOperand_NoMatch && NoMatchIsError &&
3971 !RegTok.getString().startswith_insensitive("za"))) {
3972 Error(Loc, "vector register expected");
3973 return MatchOperand_ParseFail;
3974 }
3975
3976 return MatchOperand_NoMatch;
3977 };
3978
3979 SMLoc S = getLoc();
3980 auto LCurly = getTok();
3981 Lex(); // Eat left bracket token.
3982
3983 StringRef Kind;
3984 unsigned FirstReg;
3985 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3986
3987 // Put back the original left bracket if there was no match, so that
3988 // different types of list-operands can be matched (e.g. SVE, Neon).
3989 if (ParseRes == MatchOperand_NoMatch)
3990 Parser.getLexer().UnLex(LCurly);
3991
3992 if (ParseRes != MatchOperand_Success)
3993 return ParseRes;
3994
3995 int64_t PrevReg = FirstReg;
3996 unsigned Count = 1;
3997
3998 if (parseOptionalToken(AsmToken::Minus)) {
3999 SMLoc Loc = getLoc();
4000 StringRef NextKind;
4001
4002 unsigned Reg;
4003 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4004 if (ParseRes != MatchOperand_Success)
4005 return ParseRes;
4006
4007 // Any Kind suffices must match on all regs in the list.
4008 if (Kind != NextKind) {
4009 Error(Loc, "mismatched register size suffix");
4010 return MatchOperand_ParseFail;
4011 }
4012
4013 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
4014
4015 if (Space == 0 || Space > 3) {
4016 Error(Loc, "invalid number of vectors");
4017 return MatchOperand_ParseFail;
4018 }
4019
4020 Count += Space;
4021 }
4022 else {
4023 while (parseOptionalToken(AsmToken::Comma)) {
4024 SMLoc Loc = getLoc();
4025 StringRef NextKind;
4026 unsigned Reg;
4027 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4028 if (ParseRes != MatchOperand_Success)
4029 return ParseRes;
4030
4031 // Any Kind suffices must match on all regs in the list.
4032 if (Kind != NextKind) {
4033 Error(Loc, "mismatched register size suffix");
4034 return MatchOperand_ParseFail;
4035 }
4036
4037 // Registers must be incremental (with wraparound at 31)
4038 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
4039 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
4040 Error(Loc, "registers must be sequential");
4041 return MatchOperand_ParseFail;
4042 }
4043
4044 PrevReg = Reg;
4045 ++Count;
4046 }
4047 }
4048
4049 if (parseToken(AsmToken::RCurly, "'}' expected"))
4050 return MatchOperand_ParseFail;
4051
4052 if (Count > 4) {
4053 Error(S, "invalid number of vectors");
4054 return MatchOperand_ParseFail;
4055 }
4056
4057 unsigned NumElements = 0;
4058 unsigned ElementWidth = 0;
4059 if (!Kind.empty()) {
4060 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4061 std::tie(NumElements, ElementWidth) = *VK;
4062 }
4063
4064 Operands.push_back(AArch64Operand::CreateVectorList(
4065 FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
4066 getContext()));
4067
4068 return MatchOperand_Success;
4069}
4070
4071/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4072bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4073 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4074 if (ParseRes != MatchOperand_Success)
4075 return true;
4076
4077 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
4078}
4079
4080OperandMatchResultTy
4081AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4082 SMLoc StartLoc = getLoc();
4083
4084 unsigned RegNum;
4085 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4086 if (Res != MatchOperand_Success)
4087 return Res;
4088
4089 if (!parseOptionalToken(AsmToken::Comma)) {
4090 Operands.push_back(AArch64Operand::CreateReg(
4091 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4092 return MatchOperand_Success;
4093 }
4094
4095 parseOptionalToken(AsmToken::Hash);
4096
4097 if (getTok().isNot(AsmToken::Integer)) {
4098 Error(getLoc(), "index must be absent or #0");
4099 return MatchOperand_ParseFail;
4100 }
4101
4102 const MCExpr *ImmVal;
4103 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4104 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
4105 Error(getLoc(), "index must be absent or #0");
4106 return MatchOperand_ParseFail;
4107 }
4108
4109 Operands.push_back(AArch64Operand::CreateReg(
4110 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4111 return MatchOperand_Success;
4112}
4113
4114template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4115OperandMatchResultTy
4116AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4117 SMLoc StartLoc = getLoc();
4118
4119 unsigned RegNum;
4120 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4121 if (Res != MatchOperand_Success)
4122 return Res;
4123
4124 // No shift/extend is the default.
4125 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4126 Operands.push_back(AArch64Operand::CreateReg(
4127 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4128 return MatchOperand_Success;
4129 }
4130
4131 // Eat the comma
4132 Lex();
4133
4134 // Match the shift
4135 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
4136 Res = tryParseOptionalShiftExtend(ExtOpnd);
4137 if (Res != MatchOperand_Success)
4138 return Res;
4139
4140 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4141 Operands.push_back(AArch64Operand::CreateReg(
4142 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4143 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4144 Ext->hasShiftExtendAmount()));
4145
4146 return MatchOperand_Success;
4147}
4148
4149bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4150 MCAsmParser &Parser = getParser();
4151
4152 // Some SVE instructions have a decoration after the immediate, i.e.
4153 // "mul vl". We parse them here and add tokens, which must be present in the
4154 // asm string in the tablegen instruction.
4155 bool NextIsVL =
4156 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4157 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4158 if (!getTok().getString().equals_insensitive("mul") ||
4159 !(NextIsVL || NextIsHash))
4160 return true;
4161
4162 Operands.push_back(
4163 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4164 Lex(); // Eat the "mul"
4165
4166 if (NextIsVL) {
4167 Operands.push_back(
4168 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4169 Lex(); // Eat the "vl"
4170 return false;
4171 }
4172
4173 if (NextIsHash) {
4174 Lex(); // Eat the #
4175 SMLoc S = getLoc();
4176
4177 // Parse immediate operand.
4178 const MCExpr *ImmVal;
4179 if (!Parser.parseExpression(ImmVal))
4180 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4181 Operands.push_back(AArch64Operand::CreateImm(
4182 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4183 getContext()));
4184 return MatchOperand_Success;
4185 }
4186 }
4187
4188 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4189}
4190
4191bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4192 auto Tok = getTok();
4193 if (Tok.isNot(AsmToken::Identifier))
4194 return true;
4195
4196 auto Keyword = Tok.getString();
4197 Keyword = StringSwitch<StringRef>(Keyword.lower())
4198 .Case("sm", "sm")
4199 .Case("za", "za")
4200 .Default(Keyword);
4201 Operands.push_back(
4202 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4203
4204 Lex();
4205 return false;
4206}
4207
4208/// parseOperand - Parse a arm instruction operand. For now this parses the
4209/// operand regardless of the mnemonic.
4210bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4211 bool invertCondCode) {
4212 MCAsmParser &Parser = getParser();
4213
4214 OperandMatchResultTy ResTy =
4215 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
4216
4217 // Check if the current operand has a custom associated parser, if so, try to
4218 // custom parse the operand, or fallback to the general approach.
4219 if (ResTy == MatchOperand_Success)
4220 return false;
4221 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4222 // there was a match, but an error occurred, in which case, just return that
4223 // the operand parsing failed.
4224 if (ResTy == MatchOperand_ParseFail)
4225 return true;
4226
4227 // Nothing custom, so do general case parsing.
4228 SMLoc S, E;
4229 switch (getLexer().getKind()) {
4230 default: {
4231 SMLoc S = getLoc();
4232 const MCExpr *Expr;
4233 if (parseSymbolicImmVal(Expr))
4234 return Error(S, "invalid operand");
4235
4236 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4237 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4238 return false;
4239 }
4240 case AsmToken::LBrac: {
4241 Operands.push_back(
4242 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4243 Lex(); // Eat '['
4244
4245 // There's no comma after a '[', so we can parse the next operand
4246 // immediately.
4247 return parseOperand(Operands, false, false);
4248 }
4249 case AsmToken::LCurly: {
4250 if (!parseNeonVectorList(Operands))
4251 return false;
4252
4253 Operands.push_back(
4254 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4255 Lex(); // Eat '{'
4256
4257 // There's no comma after a '{', so we can parse the next operand
4258 // immediately.
4259 return parseOperand(Operands, false, false);
4260 }
4261 case AsmToken::Identifier: {
4262 // If we're expecting a Condition Code operand, then just parse that.
4263 if (isCondCode)
4264 return parseCondCode(Operands, invertCondCode);
4265
4266 // If it's a register name, parse it.
4267 if (!parseRegister(Operands))
4268 return false;
4269
4270 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4271 // by SVE instructions.
4272 if (!parseOptionalMulOperand(Operands))
4273 return false;
4274
4275 // If this is an "smstart" or "smstop" instruction, parse its special
4276 // keyword operand as an identifier.
4277 if (Mnemonic == "smstart" || Mnemonic == "smstop")
4278 return parseKeywordOperand(Operands);
4279
4280 // This could be an optional "shift" or "extend" operand.
4281 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
4282 // We can only continue if no tokens were eaten.
4283 if (GotShift != MatchOperand_NoMatch)
4284 return GotShift;
4285
4286 // If this is a two-word mnemonic, parse its special keyword
4287 // operand as an identifier.
4288 if (Mnemonic == "brb")
4289 return parseKeywordOperand(Operands);
4290
4291 // This was not a register so parse other operands that start with an
4292 // identifier (like labels) as expressions and create them as immediates.
4293 const MCExpr *IdVal;
4294 S = getLoc();
4295 if (getParser().parseExpression(IdVal))
4296 return true;
4297 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4298 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4299 return false;
4300 }
4301 case AsmToken::Integer:
4302 case AsmToken::Real:
4303 case AsmToken::Hash: {
4304 // #42 -> immediate.
4305 S = getLoc();
4306
4307 parseOptionalToken(AsmToken::Hash);
4308
4309 // Parse a negative sign
4310 bool isNegative = false;
4311 if (getTok().is(AsmToken::Minus)) {
4312 isNegative = true;
4313 // We need to consume this token only when we have a Real, otherwise
4314 // we let parseSymbolicImmVal take care of it
4315 if (Parser.getLexer().peekTok().is(AsmToken::Real))
4316 Lex();
4317 }
4318
4319 // The only Real that should come through here is a literal #0.0 for
4320 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4321 // so convert the value.
4322 const AsmToken &Tok = getTok();
4323 if (Tok.is(AsmToken::Real)) {
4324 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4325 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4326 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4327 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4328 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4329 return TokError("unexpected floating point literal");
4330 else if (IntVal != 0 || isNegative)
4331 return TokError("expected floating-point constant #0.0");
4332 Lex(); // Eat the token.
4333
4334 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4335 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4336 return false;
4337 }
4338
4339 const MCExpr *ImmVal;
4340 if (parseSymbolicImmVal(ImmVal))
4341 return true;
4342
4343 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4344 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4345 return false;
4346 }
4347 case AsmToken::Equal: {
4348 SMLoc Loc = getLoc();
4349 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4350 return TokError("unexpected token in operand");
4351 Lex(); // Eat '='
4352 const MCExpr *SubExprVal;
4353 if (getParser().parseExpression(SubExprVal))
4354 return true;
4355
4356 if (Operands.size() < 2 ||
4357 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4358 return Error(Loc, "Only valid when first operand is register");
4359
4360 bool IsXReg =
4361 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4362 Operands[1]->getReg());
4363
4364 MCContext& Ctx = getContext();
4365 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4366 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4367 if (isa<MCConstantExpr>(SubExprVal)) {
4368 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4369 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4370 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
4371 ShiftAmt += 16;
4372 Imm >>= 16;
4373 }
4374 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4375 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4376 Operands.push_back(AArch64Operand::CreateImm(
4377 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4378 if (ShiftAmt)
4379 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4380 ShiftAmt, true, S, E, Ctx));
4381 return false;
4382 }
4383 APInt Simm = APInt(64, Imm << ShiftAmt);
4384 // check if the immediate is an unsigned or signed 32-bit int for W regs
4385 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4386 return Error(Loc, "Immediate too large for register");
4387 }
4388 // If it is a label or an imm that cannot fit in a movz, put it into CP.
4389 const MCExpr *CPLoc =
4390 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4391 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4392 return false;
4393 }
4394 }
4395}
4396
4397bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4398 const MCExpr *Expr = nullptr;
4399 SMLoc L = getLoc();
4400 if (check(getParser().parseExpression(Expr), L, "expected expression"))
4401 return true;
4402 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4403 if (check(!Value, L, "expected constant expression"))
4404 return true;
4405 Out = Value->getValue();
4406 return false;
4407}
4408
4409bool AArch64AsmParser::parseComma() {
4410 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
4411 return true;
4412 // Eat the comma
4413 Lex();
4414 return false;
4415}
4416
4417bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
4418 unsigned First, unsigned Last) {
4419 unsigned Reg;
4420 SMLoc Start, End;
4421 if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register"))
4422 return true;
4423
4424 // Special handling for FP and LR; they aren't linearly after x28 in
4425 // the registers enum.
4426 unsigned RangeEnd = Last;
4427 if (Base == AArch64::X0) {
4428 if (Last == AArch64::FP) {
4429 RangeEnd = AArch64::X28;
4430 if (Reg == AArch64::FP) {
4431 Out = 29;
4432 return false;
4433 }
4434 }
4435 if (Last == AArch64::LR) {
4436 RangeEnd = AArch64::X28;
4437 if (Reg == AArch64::FP) {
4438 Out = 29;
4439 return false;
4440 } else if (Reg == AArch64::LR) {
4441 Out = 30;
4442 return false;
4443 }
4444 }
4445 }
4446
4447 if (check(Reg < First || Reg > RangeEnd, Start,
4448 Twine("expected register in range ") +
4449 AArch64InstPrinter::getRegisterName(First) + " to " +
4450 AArch64InstPrinter::getRegisterName(Last)))
4451 return true;
4452 Out = Reg - Base;
4453 return false;
4454}
4455
4456bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
4457 const MCParsedAsmOperand &Op2) const {
4458 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
4459 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
4460 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
4461 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
4462 return MCTargetAsmParser::regsEqual(Op1, Op2);
4463
4464 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&(static_cast <bool> (AOp1.isScalarReg() && AOp2
.isScalarReg() && "Testing equality of non-scalar registers not supported"
) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4465, __extension__ __PRETTY_FUNCTION__))
4465 "Testing equality of non-scalar registers not supported")(static_cast <bool> (AOp1.isScalarReg() && AOp2
.isScalarReg() && "Testing equality of non-scalar registers not supported"
) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4465, __extension__ __PRETTY_FUNCTION__))
;
4466
4467 // Check if a registers match their sub/super register classes.
4468 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
4469 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
4470 if (AOp1.getRegEqualityTy() == EqualsSubReg)
4471 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
4472 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
4473 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
4474 if (AOp2.getRegEqualityTy() == EqualsSubReg)
4475 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
4476
4477 return false;
4478}
4479
4480/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
4481/// operands.
4482bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
4483 StringRef Name, SMLoc NameLoc,
4484 OperandVector &Operands) {
4485 Name = StringSwitch<StringRef>(Name.lower())
4486 .Case("beq", "b.eq")
4487 .Case("bne", "b.ne")
4488 .Case("bhs", "b.hs")
4489 .Case("bcs", "b.cs")
4490 .Case("blo", "b.lo")
4491 .Case("bcc", "b.cc")
4492 .Case("bmi", "b.mi")
4493 .Case("bpl", "b.pl")
4494 .Case("bvs", "b.vs")
4495 .Case("bvc", "b.vc")
4496 .Case("bhi", "b.hi")
4497 .Case("bls", "b.ls")
4498 .Case("bge", "b.ge")
4499 .Case("blt", "b.lt")
4500 .Case("bgt", "b.gt")
4501 .Case("ble", "b.le")
4502 .Case("bal", "b.al")
4503 .Case("bnv", "b.nv")
4504 .Default(Name);
4505
4506 // First check for the AArch64-specific .req directive.
4507 if (getTok().is(AsmToken::Identifier) &&
4508 getTok().getIdentifier().lower() == ".req") {
4509 parseDirectiveReq(Name, NameLoc);
4510 // We always return 'error' for this, as we're done with this
4511 // statement and don't need to match the 'instruction."
4512 return true;
4513 }
4514
4515 // Create the leading tokens for the mnemonic, split by '.' characters.
4516 size_t Start = 0, Next = Name.find('.');
4517 StringRef Head = Name.slice(Start, Next);
4518
4519 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
4520 // the SYS instruction.
4521 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
4522 Head == "cfp" || Head == "dvp" || Head == "cpp")
4523 return parseSysAlias(Head, NameLoc, Operands);
4524
4525 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
4526 Mnemonic = Head;
4527
4528 // Handle condition codes for a branch mnemonic
4529 if (Head == "b" && Next != StringRef::npos) {
4530 Start = Next;
4531 Next = Name.find('.', Start + 1);
4532 Head = Name.slice(Start + 1, Next);
4533
4534 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4535 (Head.data() - Name.data()));
4536 AArch64CC::CondCode CC = parseCondCodeString(Head);
4537 if (CC == AArch64CC::Invalid)
4538 return Error(SuffixLoc, "invalid condition code");
4539 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
4540 /*IsSuffix=*/true));
4541 Operands.push_back(
4542 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
4543 }
4544
4545 // Add the remaining tokens in the mnemonic.
4546 while (Next != StringRef::npos) {
4547 Start = Next;
4548 Next = Name.find('.', Start + 1);
4549 Head = Name.slice(Start, Next);
4550 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4551 (Head.data() - Name.data()) + 1);
4552 Operands.push_back(AArch64Operand::CreateToken(
4553 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
4554 }
4555
4556 // Conditional compare instructions have a Condition Code operand, which needs
4557 // to be parsed and an immediate operand created.
4558 bool condCodeFourthOperand =
4559 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
4560 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
4561 Head == "csinc" || Head == "csinv" || Head == "csneg");
4562
4563 // These instructions are aliases to some of the conditional select
4564 // instructions. However, the condition code is inverted in the aliased
4565 // instruction.
4566 //
4567 // FIXME: Is this the correct way to handle these? Or should the parser
4568 // generate the aliased instructions directly?
4569 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
4570 bool condCodeThirdOperand =
4571 (Head == "cinc" || Head == "cinv" || Head == "cneg");
4572
4573 // Read the remaining operands.
4574 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4575
4576 unsigned N = 1;
4577 do {
4578 // Parse and remember the operand.
4579 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
4580 (N == 3 && condCodeThirdOperand) ||
4581 (N == 2 && condCodeSecondOperand),
4582 condCodeSecondOperand || condCodeThirdOperand)) {
4583 return true;
4584 }
4585
4586 // After successfully parsing some operands there are three special cases
4587 // to consider (i.e. notional operands not separated by commas). Two are
4588 // due to memory specifiers:
4589 // + An RBrac will end an address for load/store/prefetch
4590 // + An '!' will indicate a pre-indexed operation.
4591 //
4592 // And a further case is '}', which ends a group of tokens specifying the
4593 // SME accumulator array 'ZA' or tile vector, i.e.
4594 //
4595 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
4596 //
4597 // It's someone else's responsibility to make sure these tokens are sane
4598 // in the given context!
4599
4600 if (parseOptionalToken(AsmToken::RBrac))
4601 Operands.push_back(
4602 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4603 if (parseOptionalToken(AsmToken::Exclaim))
4604 Operands.push_back(
4605 AArch64Operand::CreateToken("!", getLoc(), getContext()));
4606 if (parseOptionalToken(AsmToken::RCurly))
4607 Operands.push_back(
4608 AArch64Operand::CreateToken("}", getLoc(), getContext()));
4609
4610 ++N;
4611 } while (parseOptionalToken(AsmToken::Comma));
4612 }
4613
4614 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4615 return true;
4616
4617 return false;
4618}
4619
4620static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
4621 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31))(static_cast <bool> ((ZReg >= AArch64::Z0) &&
(ZReg <= AArch64::Z31)) ? void (0) : __assert_fail ("(ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4621, __extension__ __PRETTY_FUNCTION__))
;
4622 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
4623 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
4624 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
4625 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
4626 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
4627 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
4628}
4629
4630// FIXME: This entire function is a giant hack to provide us with decent
4631// operand range validation/diagnostics until TableGen/MC can be extended
4632// to support autogeneration of this kind of validation.
4633bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
4634 SmallVectorImpl<SMLoc> &Loc) {
4635 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4636 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
4637
4638 // A prefix only applies to the instruction following it. Here we extract
4639 // prefix information for the next instruction before validating the current
4640 // one so that in the case of failure we don't erronously continue using the
4641 // current prefix.
4642 PrefixInfo Prefix = NextPrefix;
4643 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
4644
4645 // Before validating the instruction in isolation we run through the rules
4646 // applicable when it follows a prefix instruction.
4647 // NOTE: brk & hlt can be prefixed but require no additional validation.
4648 if (Prefix.isActive() &&
4649 (Inst.getOpcode() != AArch64::BRK) &&
4650 (Inst.getOpcode() != AArch64::HLT)) {
4651
4652 // Prefixed intructions must have a destructive operand.
4653 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
4654 AArch64::NotDestructive)
4655 return Error(IDLoc, "instruction is unpredictable when following a"
4656 " movprfx, suggest replacing movprfx with mov");
4657
4658 // Destination operands must match.
4659 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
4660 return Error(Loc[0], "instruction is unpredictable when following a"
4661 " movprfx writing to a different destination");
4662
4663 // Destination operand must not be used in any other location.
4664 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
4665 if (Inst.getOperand(i).isReg() &&
4666 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
4667 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
4668 return Error(Loc[0], "instruction is unpredictable when following a"
4669 " movprfx and destination also used as non-destructive"
4670 " source");
4671 }
4672
4673 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
4674 if (Prefix.isPredicated()) {
4675 int PgIdx = -1;
4676
4677 // Find the instructions general predicate.
4678 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
4679 if (Inst.getOperand(i).isReg() &&
4680 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
4681 PgIdx = i;
4682 break;
4683 }
4684
4685 // Instruction must be predicated if the movprfx is predicated.
4686 if (PgIdx == -1 ||
4687 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
4688 return Error(IDLoc, "instruction is unpredictable when following a"
4689 " predicated movprfx, suggest using unpredicated movprfx");
4690
4691 // Instruction must use same general predicate as the movprfx.
4692 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
4693 return Error(IDLoc, "instruction is unpredictable when following a"
4694 " predicated movprfx using a different general predicate");
4695
4696 // Instruction element type must match the movprfx.
4697 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
4698 return Error(IDLoc, "instruction is unpredictable when following a"
4699 " predicated movprfx with a different element size");
4700 }
4701 }
4702
4703 // Check for indexed addressing modes w/ the base register being the
4704 // same as a destination/source register or pair load where
4705 // the Rt == Rt2. All of those are undefined behaviour.
4706 switch (Inst.getOpcode()) {
4707 case AArch64::LDPSWpre:
4708 case AArch64::LDPWpost:
4709 case AArch64::LDPWpre:
4710 case AArch64::LDPXpost:
4711 case AArch64::LDPXpre: {
4712 unsigned Rt = Inst.getOperand(1).getReg();
4713 unsigned Rt2 = Inst.getOperand(2).getReg();
4714 unsigned Rn = Inst.getOperand(3).getReg();
4715 if (RI->isSubRegisterEq(Rn, Rt))
4716 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
4717 "is also a destination");
4718 if (RI->isSubRegisterEq(Rn, Rt2))
4719 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
4720 "is also a destination");
4721 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4722 }
4723 case AArch64::LDPDi:
4724 case AArch64::LDPQi:
4725 case AArch64::LDPSi:
4726 case AArch64::LDPSWi:
4727 case AArch64::LDPWi:
4728 case AArch64::LDPXi: {
4729 unsigned Rt = Inst.getOperand(0).getReg();
4730 unsigned Rt2 = Inst.getOperand(1).getReg();
4731 if (Rt == Rt2)
4732 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4733 break;
4734 }
4735 case AArch64::LDPDpost:
4736 case AArch64::LDPDpre:
4737 case AArch64::LDPQpost:
4738 case AArch64::LDPQpre:
4739 case AArch64::LDPSpost:
4740 case AArch64::LDPSpre:
4741 case AArch64::LDPSWpost: {
4742 unsigned Rt = Inst.getOperand(1).getReg();
4743 unsigned Rt2 = Inst.getOperand(2).getReg();
4744 if (Rt == Rt2)
4745 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4746 break;
4747 }
4748 case AArch64::STPDpost:
4749 case AArch64::STPDpre:
4750 case AArch64::STPQpost:
4751 case AArch64::STPQpre:
4752 case AArch64::STPSpost:
4753 case AArch64::STPSpre:
4754 case AArch64::STPWpost:
4755 case AArch64::STPWpre:
4756 case AArch64::STPXpost:
4757 case AArch64::STPXpre: {
4758 unsigned Rt = Inst.getOperand(1).getReg();
4759 unsigned Rt2 = Inst.getOperand(2).getReg();
4760 unsigned Rn = Inst.getOperand(3).getReg();
4761 if (RI->isSubRegisterEq(Rn, Rt))
4762 return Error(Loc[0], "unpredictable STP instruction, writeback base "
4763 "is also a source");
4764 if (RI->isSubRegisterEq(Rn, Rt2))
4765 return Error(Loc[1], "unpredictable STP instruction, writeback base "
4766 "is also a source");
4767 break;
4768 }
4769 case AArch64::LDRBBpre:
4770 case AArch64::LDRBpre:
4771 case AArch64::LDRHHpre:
4772 case AArch64::LDRHpre:
4773 case AArch64::LDRSBWpre:
4774 case AArch64::LDRSBXpre:
4775 case AArch64::LDRSHWpre:
4776 case AArch64::LDRSHXpre:
4777 case AArch64::LDRSWpre:
4778 case AArch64::LDRWpre:
4779 case AArch64::LDRXpre:
4780 case AArch64::LDRBBpost:
4781 case AArch64::LDRBpost:
4782 case AArch64::LDRHHpost:
4783 case AArch64::LDRHpost:
4784 case AArch64::LDRSBWpost:
4785 case AArch64::LDRSBXpost:
4786 case AArch64::LDRSHWpost:
4787 case AArch64::LDRSHXpost:
4788 case AArch64::LDRSWpost:
4789 case AArch64::LDRWpost:
4790 case AArch64::LDRXpost: {
4791 unsigned Rt = Inst.getOperand(1).getReg();
4792 unsigned Rn = Inst.getOperand(2).getReg();
4793 if (RI->isSubRegisterEq(Rn, Rt))
4794 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4795 "is also a source");
4796 break;
4797 }
4798 case AArch64::STRBBpost:
4799 case AArch64::STRBpost:
4800 case AArch64::STRHHpost:
4801 case AArch64::STRHpost:
4802 case AArch64::STRWpost:
4803 case AArch64::STRXpost:
4804 case AArch64::STRBBpre:
4805 case AArch64::STRBpre:
4806 case AArch64::STRHHpre:
4807 case AArch64::STRHpre:
4808 case AArch64::STRWpre:
4809 case AArch64::STRXpre: {
4810 unsigned Rt = Inst.getOperand(1).getReg();
4811 unsigned Rn = Inst.getOperand(2).getReg();
4812 if (RI->isSubRegisterEq(Rn, Rt))
4813 return Error(Loc[0], "unpredictable STR instruction, writeback base "
4814 "is also a source");
4815 break;
4816 }
4817 case AArch64::STXRB:
4818 case AArch64::STXRH:
4819 case AArch64::STXRW:
4820 case AArch64::STXRX:
4821 case AArch64::STLXRB:
4822 case AArch64::STLXRH:
4823 case AArch64::STLXRW:
4824 case AArch64::STLXRX: {
4825 unsigned Rs = Inst.getOperand(0).getReg();
4826 unsigned Rt = Inst.getOperand(1).getReg();
4827 unsigned Rn = Inst.getOperand(2).getReg();
4828 if (RI->isSubRegisterEq(Rt, Rs) ||
4829 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4830 return Error(Loc[0],
4831 "unpredictable STXR instruction, status is also a source");
4832 break;
4833 }
4834 case AArch64::STXPW:
4835 case AArch64::STXPX:
4836 case AArch64::STLXPW:
4837 case AArch64::STLXPX: {
4838 unsigned Rs = Inst.getOperand(0).getReg();
4839 unsigned Rt1 = Inst.getOperand(1).getReg();
4840 unsigned Rt2 = Inst.getOperand(2).getReg();
4841 unsigned Rn = Inst.getOperand(3).getReg();
4842 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4843 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4844 return Error(Loc[0],
4845 "unpredictable STXP instruction, status is also a source");
4846 break;
4847 }
4848 case AArch64::LDRABwriteback:
4849 case AArch64::LDRAAwriteback: {
4850 unsigned Xt = Inst.getOperand(0).getReg();
4851 unsigned Xn = Inst.getOperand(1).getReg();
4852 if (Xt == Xn)
4853 return Error(Loc[0],
4854 "unpredictable LDRA instruction, writeback base"
4855 " is also a destination");
4856 break;
4857 }
4858 }
4859
4860
4861 // Now check immediate ranges. Separate from the above as there is overlap
4862 // in the instructions being checked and this keeps the nested conditionals
4863 // to a minimum.
4864 switch (Inst.getOpcode()) {
4865 case AArch64::ADDSWri:
4866 case AArch64::ADDSXri:
4867 case AArch64::ADDWri:
4868 case AArch64::ADDXri:
4869 case AArch64::SUBSWri:
4870 case AArch64::SUBSXri:
4871 case AArch64::SUBWri:
4872 case AArch64::SUBXri: {
4873 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4874 // some slight duplication here.
4875 if (Inst.getOperand(2).isExpr()) {
4876 const MCExpr *Expr = Inst.getOperand(2).getExpr();
4877 AArch64MCExpr::VariantKind ELFRefKind;
4878 MCSymbolRefExpr::VariantKind DarwinRefKind;
4879 int64_t Addend;
4880 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4881
4882 // Only allow these with ADDXri.
4883 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4884 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4885 Inst.getOpcode() == AArch64::ADDXri)
4886 return false;
4887
4888 // Only allow these with ADDXri/ADDWri
4889 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4890 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4891 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4892 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4893 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4894 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4895 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4896 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4897 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4898 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4899 (Inst.getOpcode() == AArch64::ADDXri ||
4900 Inst.getOpcode() == AArch64::ADDWri))
4901 return false;
4902
4903 // Don't allow symbol refs in the immediate field otherwise
4904 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4905 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4906 // 'cmp w0, 'borked')
4907 return Error(Loc.back(), "invalid immediate expression");
4908 }
4909 // We don't validate more complex expressions here
4910 }
4911 return false;
4912 }
4913 default:
4914 return false;
4915 }
4916}
4917
4918static std::string AArch64MnemonicSpellCheck(StringRef S,
4919 const FeatureBitset &FBS,
4920 unsigned VariantID = 0);
4921
4922bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4923 uint64_t ErrorInfo,
4924 OperandVector &Operands) {
4925 switch (ErrCode) {
4926 case Match_InvalidTiedOperand: {
4927 RegConstraintEqualityTy EqTy =
4928 static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4929 .getRegEqualityTy();
4930 switch (EqTy) {
4931 case RegConstraintEqualityTy::EqualsSubReg:
4932 return Error(Loc, "operand must be 64-bit form of destination register");
4933 case RegConstraintEqualityTy::EqualsSuperReg:
4934 return Error(Loc, "operand must be 32-bit form of destination register");
4935 case RegConstraintEqualityTy::EqualsReg:
4936 return Error(Loc, "operand must match destination register");
4937 }
4938 llvm_unreachable("Unknown RegConstraintEqualityTy")::llvm::llvm_unreachable_internal("Unknown RegConstraintEqualityTy"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4938)
;
4939 }
4940 case Match_MissingFeature:
4941 return Error(Loc,
4942 "instruction requires a CPU feature not currently enabled");
4943 case Match_InvalidOperand:
4944 return Error(Loc, "invalid operand for instruction");
4945 case Match_InvalidSuffix:
4946 return Error(Loc, "invalid type suffix for instruction");
4947 case Match_InvalidCondCode:
4948 return Error(Loc, "expected AArch64 condition code");
4949 case Match_AddSubRegExtendSmall:
4950 return Error(Loc,
4951 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4952 case Match_AddSubRegExtendLarge:
4953 return Error(Loc,
4954 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4955 case Match_AddSubSecondSource:
4956 return Error(Loc,
4957 "expected compatible register, symbol or integer in range [0, 4095]");
4958 case Match_LogicalSecondSource:
4959 return Error(Loc, "expected compatible register or logical immediate");
4960 case Match_InvalidMovImm32Shift:
4961 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4962 case Match_InvalidMovImm64Shift:
4963 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4964 case Match_AddSubRegShift32:
4965 return Error(Loc,
4966 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4967 case Match_AddSubRegShift64:
4968 return Error(Loc,
4969 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4970 case Match_InvalidFPImm:
4971 return Error(Loc,
4972 "expected compatible register or floating-point constant");
4973 case Match_InvalidMemoryIndexedSImm6:
4974 return Error(Loc, "index must be an integer in range [-32, 31].");
4975 case Match_InvalidMemoryIndexedSImm5:
4976 return Error(Loc, "index must be an integer in range [-16, 15].");
4977 case Match_InvalidMemoryIndexed1SImm4:
4978 return Error(Loc, "index must be an integer in range [-8, 7].");
4979 case Match_InvalidMemoryIndexed2SImm4:
4980 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4981 case Match_InvalidMemoryIndexed3SImm4:
4982 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4983 case Match_InvalidMemoryIndexed4SImm4:
4984 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4985 case Match_InvalidMemoryIndexed16SImm4:
4986 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4987 case Match_InvalidMemoryIndexed32SImm4:
4988 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
4989 case Match_InvalidMemoryIndexed1SImm6:
4990 return Error(Loc, "index must be an integer in range [-32, 31].");
4991 case Match_InvalidMemoryIndexedSImm8:
4992 return Error(Loc, "index must be an integer in range [-128, 127].");
4993 case Match_InvalidMemoryIndexedSImm9:
4994 return Error(Loc, "index must be an integer in range [-256, 255].");
4995 case Match_InvalidMemoryIndexed16SImm9:
4996 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4997 case Match_InvalidMemoryIndexed8SImm10:
4998 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4999 case Match_InvalidMemoryIndexed4SImm7:
5000 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5001 case Match_InvalidMemoryIndexed8SImm7:
5002 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5003 case Match_InvalidMemoryIndexed16SImm7:
5004 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5005 case Match_InvalidMemoryIndexed8UImm5:
5006 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5007 case Match_InvalidMemoryIndexed4UImm5:
5008 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5009 case Match_InvalidMemoryIndexed2UImm5:
5010 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5011 case Match_InvalidMemoryIndexed8UImm6:
5012 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5013 case Match_InvalidMemoryIndexed16UImm6:
5014 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5015 case Match_InvalidMemoryIndexed4UImm6:
5016 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5017 case Match_InvalidMemoryIndexed2UImm6:
5018 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5019 case Match_InvalidMemoryIndexed1UImm6:
5020 return Error(Loc, "index must be in range [0, 63].");
5021 case Match_InvalidMemoryWExtend8:
5022 return Error(Loc,
5023 "expected 'uxtw' or 'sxtw' with optional shift of #0");
5024 case Match_InvalidMemoryWExtend16:
5025 return Error(Loc,
5026 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5027 case Match_InvalidMemoryWExtend32:
5028 return Error(Loc,
5029 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5030 case Match_InvalidMemoryWExtend64:
5031 return Error(Loc,
5032 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5033 case Match_InvalidMemoryWExtend128:
5034 return Error(Loc,
5035 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5036 case Match_InvalidMemoryXExtend8:
5037 return Error(Loc,
5038 "expected 'lsl' or 'sxtx' with optional shift of #0");
5039 case Match_InvalidMemoryXExtend16:
5040 return Error(Loc,
5041 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5042 case Match_InvalidMemoryXExtend32:
5043 return Error(Loc,
5044 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5045 case Match_InvalidMemoryXExtend64:
5046 return Error(Loc,
5047 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5048 case Match_InvalidMemoryXExtend128:
5049 return Error(Loc,
5050 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5051 case Match_InvalidMemoryIndexed1:
5052 return Error(Loc, "index must be an integer in range [0, 4095].");
5053 case Match_InvalidMemoryIndexed2:
5054 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
5055 case Match_InvalidMemoryIndexed4:
5056 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
5057 case Match_InvalidMemoryIndexed8:
5058 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
5059 case Match_InvalidMemoryIndexed16:
5060 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
5061 case Match_InvalidImm0_1:
5062 return Error(Loc, "immediate must be an integer in range [0, 1].");
5063 case Match_InvalidImm0_3:
5064 return Error(Loc, "immediate must be an integer in range [0, 3].");
5065 case Match_InvalidImm0_7:
5066 return Error(Loc, "immediate must be an integer in range [0, 7].");
5067 case Match_InvalidImm0_15:
5068 return Error(Loc, "immediate must be an integer in range [0, 15].");
5069 case Match_InvalidImm0_31:
5070 return Error(Loc, "immediate must be an integer in range [0, 31].");
5071 case Match_InvalidImm0_63:
5072 return Error(Loc, "immediate must be an integer in range [0, 63].");
5073 case Match_InvalidImm0_127:
5074 return Error(Loc, "immediate must be an integer in range [0, 127].");
5075 case Match_InvalidImm0_255:
5076 return Error(Loc, "immediate must be an integer in range [0, 255].");
5077 case Match_InvalidImm0_65535:
5078 return Error(Loc, "immediate must be an integer in range [0, 65535].");
5079 case Match_InvalidImm1_8:
5080 return Error(Loc, "immediate must be an integer in range [1, 8].");
5081 case Match_InvalidImm1_16:
5082 return Error(Loc, "immediate must be an integer in range [1, 16].");
5083 case Match_InvalidImm1_32:
5084 return Error(Loc, "immediate must be an integer in range [1, 32].");
5085 case Match_InvalidImm1_64:
5086 return Error(Loc, "immediate must be an integer in range [1, 64].");
5087 case Match_InvalidSVEAddSubImm8:
5088 return Error(Loc, "immediate must be an integer in range [0, 255]"
5089 " with a shift amount of 0");
5090 case Match_InvalidSVEAddSubImm16:
5091 case Match_InvalidSVEAddSubImm32:
5092 case Match_InvalidSVEAddSubImm64:
5093 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
5094 "multiple of 256 in range [256, 65280]");
5095 case Match_InvalidSVECpyImm8:
5096 return Error(Loc, "immediate must be an integer in range [-128, 255]"
5097 " with a shift amount of 0");
5098 case Match_InvalidSVECpyImm16:
5099 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5100 "multiple of 256 in range [-32768, 65280]");
5101 case Match_InvalidSVECpyImm32:
5102 case Match_InvalidSVECpyImm64:
5103 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5104 "multiple of 256 in range [-32768, 32512]");
5105 case Match_InvalidIndexRange1_1:
5106 return Error(Loc, "expected lane specifier '[1]'");
5107 case Match_InvalidIndexRange0_15:
5108 return Error(Loc, "vector lane must be an integer in range [0, 15].");
5109 case Match_InvalidIndexRange0_7:
5110 return Error(Loc, "vector lane must be an integer in range [0, 7].");
5111 case Match_InvalidIndexRange0_3:
5112 return Error(Loc, "vector lane must be an integer in range [0, 3].");
5113 case Match_InvalidIndexRange0_1:
5114 return Error(Loc, "vector lane must be an integer in range [0, 1].");
5115 case Match_InvalidSVEIndexRange0_63:
5116 return Error(Loc, "vector lane must be an integer in range [0, 63].");
5117 case Match_InvalidSVEIndexRange0_31:
5118 return Error(Loc, "vector lane must be an integer in range [0, 31].");
5119 case Match_InvalidSVEIndexRange0_15:
5120 return Error(Loc, "vector lane must be an integer in range [0, 15].");
5121 case Match_InvalidSVEIndexRange0_7:
5122 return Error(Loc, "vector lane must be an integer in range [0, 7].");
5123 case Match_InvalidSVEIndexRange0_3:
5124 return Error(Loc, "vector lane must be an integer in range [0, 3].");
5125 case Match_InvalidLabel:
5126 return Error(Loc, "expected label or encodable integer pc offset");
5127 case Match_MRS:
5128 return Error(Loc, "expected readable system register");
5129 case Match_MSR:
5130 case Match_InvalidSVCR:
5131 return Error(Loc, "expected writable system register or pstate");
5132 case Match_InvalidComplexRotationEven:
5133 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
5134 case Match_InvalidComplexRotationOdd:
5135 return Error(Loc, "complex rotation must be 90 or 270.");
5136 case Match_MnemonicFail: {
5137 std::string Suggestion = AArch64MnemonicSpellCheck(
5138 ((AArch64Operand &)*Operands[0]).getToken(),
5139 ComputeAvailableFeatures(STI->getFeatureBits()));
5140 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
5141 }
5142 case Match_InvalidGPR64shifted8:
5143 return Error(Loc, "register must be x0..x30 or xzr, without shift");
5144 case Match_InvalidGPR64shifted16:
5145 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
5146 case Match_InvalidGPR64shifted32:
5147 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
5148 case Match_InvalidGPR64shifted64:
5149 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
5150 case Match_InvalidGPR64shifted128:
5151 return Error(
5152 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
5153 case Match_InvalidGPR64NoXZRshifted8:
5154 return Error(Loc, "register must be x0..x30 without shift");
5155 case Match_InvalidGPR64NoXZRshifted16:
5156 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
5157 case Match_InvalidGPR64NoXZRshifted32:
5158 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
5159 case Match_InvalidGPR64NoXZRshifted64:
5160 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
5161 case Match_InvalidGPR64NoXZRshifted128:
5162 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
5163 case Match_InvalidZPR32UXTW8:
5164 case Match_InvalidZPR32SXTW8:
5165 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
5166 case Match_InvalidZPR32UXTW16:
5167 case Match_InvalidZPR32SXTW16:
5168 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
5169 case Match_InvalidZPR32UXTW32:
5170 case Match_InvalidZPR32SXTW32:
5171 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
5172 case Match_InvalidZPR32UXTW64:
5173 case Match_InvalidZPR32SXTW64:
5174 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
5175 case Match_InvalidZPR64UXTW8:
5176 case Match_InvalidZPR64SXTW8:
5177 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
5178 case Match_InvalidZPR64UXTW16:
5179 case Match_InvalidZPR64SXTW16:
5180 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
5181 case Match_InvalidZPR64UXTW32:
5182 case Match_InvalidZPR64SXTW32:
5183 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
5184 case Match_InvalidZPR64UXTW64:
5185 case Match_InvalidZPR64SXTW64:
5186 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
5187 case Match_InvalidZPR32LSL8:
5188 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
5189 case Match_InvalidZPR32LSL16:
5190 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
5191 case Match_InvalidZPR32LSL32:
5192 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
5193 case Match_InvalidZPR32LSL64:
5194 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
5195 case Match_InvalidZPR64LSL8:
5196 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
5197 case Match_InvalidZPR64LSL16:
5198 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
5199 case Match_InvalidZPR64LSL32:
5200 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
5201 case Match_InvalidZPR64LSL64:
5202 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
5203 case Match_InvalidZPR0:
5204 return Error(Loc, "expected register without element width suffix");
5205 case Match_InvalidZPR8:
5206 case Match_InvalidZPR16:
5207 case Match_InvalidZPR32:
5208 case Match_InvalidZPR64:
5209 case Match_InvalidZPR128:
5210 return Error(Loc, "invalid element width");
5211 case Match_InvalidZPR_3b8:
5212 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
5213 case Match_InvalidZPR_3b16:
5214 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
5215 case Match_InvalidZPR_3b32:
5216 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
5217 case Match_InvalidZPR_4b16:
5218 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
5219 case Match_InvalidZPR_4b32:
5220 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
5221 case Match_InvalidZPR_4b64:
5222 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
5223 case Match_InvalidSVEPattern:
5224 return Error(Loc, "invalid predicate pattern");
5225 case Match_InvalidSVEPredicateAnyReg:
5226 case Match_InvalidSVEPredicateBReg:
5227 case Match_InvalidSVEPredicateHReg:
5228 case Match_InvalidSVEPredicateSReg:
5229 case Match_InvalidSVEPredicateDReg:
5230 return Error(Loc, "invalid predicate register.");
5231 case Match_InvalidSVEPredicate3bAnyReg:
5232 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
5233 case Match_InvalidSVEExactFPImmOperandHalfOne:
5234 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
5235 case Match_InvalidSVEExactFPImmOperandHalfTwo:
5236 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
5237 case Match_InvalidSVEExactFPImmOperandZeroOne:
5238 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
5239 case Match_InvalidMatrixTileVectorH8:
5240 case Match_InvalidMatrixTileVectorV8:
5241 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
5242 case Match_InvalidMatrixTileVectorH16:
5243 case Match_InvalidMatrixTileVectorV16:
5244 return Error(Loc,
5245 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
5246 case Match_InvalidMatrixTileVectorH32:
5247 case Match_InvalidMatrixTileVectorV32:
5248 return Error(Loc,
5249 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
5250 case Match_InvalidMatrixTileVectorH64:
5251 case Match_InvalidMatrixTileVectorV64:
5252 return Error(Loc,
5253 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
5254 case Match_InvalidMatrixTileVectorH128:
5255 case Match_InvalidMatrixTileVectorV128:
5256 return Error(Loc,
5257 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
5258 case Match_InvalidMatrixTile32:
5259 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
5260 case Match_InvalidMatrixTile64:
5261 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
5262 case Match_InvalidMatrix:
5263 return Error(Loc, "invalid matrix operand, expected za");
5264 case Match_InvalidMatrixIndexGPR32_12_15:
5265 return Error(Loc, "operand must be a register in range [w12, w15]");
5266 default:
5267 llvm_unreachable("unexpected error code!")::llvm::llvm_unreachable_internal("unexpected error code!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5267)
;
5268 }
5269}
5270
5271static const char *getSubtargetFeatureName(uint64_t Val);
5272
5273bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
5274 OperandVector &Operands,
5275 MCStreamer &Out,
5276 uint64_t &ErrorInfo,
5277 bool MatchingInlineAsm) {
5278 assert(!Operands.empty() && "Unexpect empty operand list!")(static_cast <bool> (!Operands.empty() && "Unexpect empty operand list!"
) ? void (0) : __assert_fail ("!Operands.empty() && \"Unexpect empty operand list!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5278, __extension__ __PRETTY_FUNCTION__))
;
5279 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
5280 assert(Op.isToken() && "Leading operand should always be a mnemonic!")(static_cast <bool> (Op.isToken() && "Leading operand should always be a mnemonic!"
) ? void (0) : __assert_fail ("Op.isToken() && \"Leading operand should always be a mnemonic!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5280, __extension__ __PRETTY_FUNCTION__))
;
5281
5282 StringRef Tok = Op.getToken();
5283 unsigned NumOperands = Operands.size();
5284
5285 if (NumOperands == 4 && Tok == "lsl") {
5286 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
5287 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5288 if (Op2.isScalarReg() && Op3.isImm()) {
5289 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5290 if (Op3CE) {
5291 uint64_t Op3Val = Op3CE->getValue();
5292 uint64_t NewOp3Val = 0;
5293 uint64_t NewOp4Val = 0;
5294 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
5295 Op2.getReg())) {
5296 NewOp3Val = (32 - Op3Val) & 0x1f;
5297 NewOp4Val = 31 - Op3Val;
5298 } else {
5299 NewOp3Val = (64 - Op3Val) & 0x3f;
5300 NewOp4Val = 63 - Op3Val;
5301 }
5302
5303 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
5304 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
5305
5306 Operands[0] =
5307 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
5308 Operands.push_back(AArch64Operand::CreateImm(
5309 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
5310 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
5311 Op3.getEndLoc(), getContext());
5312 }
5313 }
5314 } else if (NumOperands == 4 && Tok == "bfc") {
5315 // FIXME: Horrible hack to handle BFC->BFM alias.
5316 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5317 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
5318 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
5319
5320 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
5321 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
5322 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
5323
5324 if (LSBCE && WidthCE) {
5325 uint64_t LSB = LSBCE->getValue();
5326 uint64_t Width = WidthCE->getValue();
5327
5328 uint64_t RegWidth = 0;
5329 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5330 Op1.getReg()))
5331 RegWidth = 64;
5332 else
5333 RegWidth = 32;
5334
5335 if (LSB >= RegWidth)
5336 return Error(LSBOp.getStartLoc(),
5337 "expected integer in range [0, 31]");
5338 if (Width < 1 || Width > RegWidth)
5339 return Error(WidthOp.getStartLoc(),
5340 "expected integer in range [1, 32]");
5341
5342 uint64_t ImmR = 0;
5343 if (RegWidth == 32)
5344 ImmR = (32 - LSB) & 0x1f;
5345 else
5346 ImmR = (64 - LSB) & 0x3f;
5347
5348 uint64_t ImmS = Width - 1;
5349
5350 if (ImmR != 0 && ImmS >= ImmR)
5351 return Error(WidthOp.getStartLoc(),
5352 "requested insert overflows register");
5353
5354 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
5355 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
5356 Operands[0] =
5357 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
5358 Operands[2] = AArch64Operand::CreateReg(
5359 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
5360 SMLoc(), SMLoc(), getContext());
5361 Operands[3] = AArch64Operand::CreateImm(
5362 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
5363 Operands.emplace_back(
5364 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
5365 WidthOp.getEndLoc(), getContext()));
5366 }
5367 }
5368 } else if (NumOperands == 5) {
5369 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
5370 // UBFIZ -> UBFM aliases.
5371 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
5372 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5373 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5374 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
5375
5376 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
5377 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5378 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
5379
5380 if (Op3CE && Op4CE) {
5381 uint64_t Op3Val = Op3CE->getValue();
5382 uint64_t Op4Val = Op4CE->getValue();
5383
5384 uint64_t RegWidth = 0;
5385 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5386 Op1.getReg()))
5387 RegWidth = 64;
5388 else
5389 RegWidth = 32;
5390
5391 if (Op3Val >= RegWidth)
5392 return Error(Op3.getStartLoc(),
5393 "expected integer in range [0, 31]");
5394 if (Op4Val < 1 || Op4Val > RegWidth)
5395 return Error(Op4.getStartLoc(),
5396 "expected integer in range [1, 32]");
5397
5398 uint64_t NewOp3Val = 0;
5399 if (RegWidth == 32)
5400 NewOp3Val = (32 - Op3Val) & 0x1f;
5401 else
5402 NewOp3Val = (64 - Op3Val) & 0x3f;
5403
5404 uint64_t NewOp4Val = Op4Val - 1;
5405
5406 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
5407 return Error(Op4.getStartLoc(),
5408 "requested insert overflows register");
5409
5410 const MCExpr *NewOp3 =
5411 MCConstantExpr::create(NewOp3Val, getContext());
5412 const MCExpr *NewOp4 =
5413 MCConstantExpr::create(NewOp4Val, getContext());
5414 Operands[3] = AArch64Operand::CreateImm(
5415 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
5416 Operands[4] = AArch64Operand::CreateImm(
5417 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
5418 if (Tok == "bfi")
5419 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
5420 getContext());
5421 else if (Tok == "sbfiz")
5422 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
5423 getContext());
5424 else if (Tok == "ubfiz")
5425 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
5426 getContext());
5427 else
5428 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5428)
;
5429 }
5430 }
5431
5432 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
5433 // UBFX -> UBFM aliases.
5434 } else if (NumOperands == 5 &&
5435 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
5436 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5437 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5438 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
5439
5440 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
5441 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5442 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
5443
5444 if (Op3CE && Op4CE) {
5445 uint64_t Op3Val = Op3CE->getValue();
5446 uint64_t Op4Val = Op4CE->getValue();
5447
5448 uint64_t RegWidth = 0;
5449 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5450 Op1.getReg()))
5451 RegWidth = 64;
5452 else
5453 RegWidth = 32;
5454
5455 if (Op3Val >= RegWidth)
5456 return Error(Op3.getStartLoc(),
5457 "expected integer in range [0, 31]");
5458 if (Op4Val < 1 || Op4Val > RegWidth)
5459 return Error(Op4.getStartLoc(),
5460 "expected integer in range [1, 32]");
5461
5462 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
5463
5464 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
5465 return Error(Op4.getStartLoc(),
5466 "requested extract overflows register");
5467
5468 const MCExpr *NewOp4 =
5469 MCConstantExpr::create(NewOp4Val, getContext());
5470 Operands[4] = AArch64Operand::CreateImm(
5471 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
5472 if (Tok == "bfxil")
5473 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
5474 getContext());
5475 else if (Tok == "sbfx")
5476 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
5477 getContext());
5478 else if (Tok == "ubfx")
5479 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
5480 getContext());
5481 else
5482 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5482)
;
5483 }
5484 }
5485 }
5486 }
5487
5488 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
5489 // instruction for FP registers correctly in some rare circumstances. Convert
5490 // it to a safe instruction and warn (because silently changing someone's
5491 // assembly is rude).
5492 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
5493 NumOperands == 4 && Tok == "movi") {
5494 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5495 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
5496 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5497 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
5498 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
5499 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
5500 if (Suffix.lower() == ".2d" &&
5501 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
5502 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
5503 " correctly on this CPU, converting to equivalent movi.16b");
5504 // Switch the suffix to .16b.
5505 unsigned Idx = Op1.isToken() ? 1 : 2;
5506 Operands[Idx] =
5507 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
5508 }
5509 }
5510 }
5511
5512 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
5513 // InstAlias can't quite handle this since the reg classes aren't
5514 // subclasses.
5515 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
5516 // The source register can be Wn here, but the matcher expects a
5517 // GPR64. Twiddle it here if necessary.
5518 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5519 if (Op.isScalarReg()) {
5520 unsigned Reg = getXRegFromWReg(Op.getReg());
5521 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5522 Op.getStartLoc(), Op.getEndLoc(),
5523 getContext());
5524 }
5525 }
5526 // FIXME: Likewise for sxt[bh] with a Xd dst operand
5527 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
5528 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5529 if (Op.isScalarReg() &&
5530 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5531 Op.getReg())) {
5532 // The source register can be Wn here, but the matcher expects a
5533 // GPR64. Twiddle it here if necessary.
5534 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5535 if (Op.isScalarReg()) {
5536 unsigned Reg = getXRegFromWReg(Op.getReg());
5537 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5538 Op.getStartLoc(),
5539 Op.getEndLoc(), getContext());
5540 }
5541 }
5542 }
5543 // FIXME: Likewise for uxt[bh] with a Xd dst operand
5544 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
5545 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5546 if (Op.isScalarReg() &&
5547 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5548 Op.getReg())) {
5549 // The source register can be Wn here, but the matcher expects a
5550 // GPR32. Twiddle it here if necessary.
5551 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5552 if (Op.isScalarReg()) {
5553 unsigned Reg = getWRegFromXReg(Op.getReg());
5554 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5555 Op.getStartLoc(),
5556 Op.getEndLoc(), getContext());
5557 }
5558 }
5559 }
5560
5561 MCInst Inst;
5562 FeatureBitset MissingFeatures;
5563 // First try to match against the secondary set of tables containing the
5564 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
5565 unsigned MatchResult =
5566 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5567 MatchingInlineAsm, 1);
5568
5569 // If that fails, try against the alternate table containing long-form NEON:
5570 // "fadd v0.2s, v1.2s, v2.2s"
5571 if (MatchResult != Match_Success) {
5572 // But first, save the short-form match result: we can use it in case the
5573 // long-form match also fails.
5574 auto ShortFormNEONErrorInfo = ErrorInfo;
5575 auto ShortFormNEONMatchResult = MatchResult;
5576 auto ShortFormNEONMissingFeatures = MissingFeatures;
5577
5578 MatchResult =
5579 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5580 MatchingInlineAsm, 0);
5581
5582 // Now, both matches failed, and the long-form match failed on the mnemonic
5583 // suffix token operand. The short-form match failure is probably more
5584 // relevant: use it instead.
5585 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
5586 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
5587 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
5588 MatchResult = ShortFormNEONMatchResult;
5589 ErrorInfo = ShortFormNEONErrorInfo;
5590 MissingFeatures = ShortFormNEONMissingFeatures;
5591 }
5592 }
5593
5594 switch (MatchResult) {
5595 case Match_Success: {
5596 // Perform range checking and other semantic validations
5597 SmallVector<SMLoc, 8> OperandLocs;
5598 NumOperands = Operands.size();
5599 for (unsigned i = 1; i < NumOperands; ++i)
5600 OperandLocs.push_back(Operands[i]->getStartLoc());
5601 if (validateInstruction(Inst, IDLoc, OperandLocs))
5602 return true;
5603
5604 Inst.setLoc(IDLoc);
5605 Out.emitInstruction(Inst, getSTI());
5606 return false;
5607 }
5608 case Match_MissingFeature: {
5609 assert(MissingFeatures.any() && "Unknown missing feature!")(static_cast <bool> (MissingFeatures.any() && "Unknown missing feature!"
) ? void (0) : __assert_fail ("MissingFeatures.any() && \"Unknown missing feature!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5609, __extension__ __PRETTY_FUNCTION__))
;
5610 // Special case the error message for the very common case where only
5611 // a single subtarget feature is missing (neon, e.g.).
5612 std::string Msg = "instruction requires:";
5613 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
5614 if (MissingFeatures[i]) {
5615 Msg += " ";
5616 Msg += getSubtargetFeatureName(i);
5617 }
5618 }
5619 return Error(IDLoc, Msg);
5620 }
5621 case Match_MnemonicFail:
5622 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
5623 case Match_InvalidOperand: {
5624 SMLoc ErrorLoc = IDLoc;
5625
5626 if (ErrorInfo != ~0ULL) {
5627 if (ErrorInfo >= Operands.size())
5628 return Error(IDLoc, "too few operands for instruction",
5629 SMRange(IDLoc, getTok().getLoc()));
5630
5631 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5632 if (ErrorLoc == SMLoc())
5633 ErrorLoc = IDLoc;
5634 }
5635 // If the match failed on a suffix token operand, tweak the diagnostic
5636 // accordingly.
5637 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
5638 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
5639 MatchResult = Match_InvalidSuffix;
5640
5641 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5642 }
5643 case Match_InvalidTiedOperand:
5644 case Match_InvalidMemoryIndexed1:
5645 case Match_InvalidMemoryIndexed2:
5646 case Match_InvalidMemoryIndexed4:
5647 case Match_InvalidMemoryIndexed8:
5648 case Match_InvalidMemoryIndexed16:
5649 case Match_InvalidCondCode:
5650 case Match_AddSubRegExtendSmall:
5651 case Match_AddSubRegExtendLarge:
5652 case Match_AddSubSecondSource:
5653 case Match_LogicalSecondSource:
5654 case Match_AddSubRegShift32:
5655 case Match_AddSubRegShift64:
5656 case Match_InvalidMovImm32Shift:
5657 case Match_InvalidMovImm64Shift:
5658 case Match_InvalidFPImm:
5659 case Match_InvalidMemoryWExtend8:
5660 case Match_InvalidMemoryWExtend16:
5661 case Match_InvalidMemoryWExtend32:
5662 case Match_InvalidMemoryWExtend64:
5663 case Match_InvalidMemoryWExtend128:
5664 case Match_InvalidMemoryXExtend8:
5665 case Match_InvalidMemoryXExtend16:
5666 case Match_InvalidMemoryXExtend32:
5667 case Match_InvalidMemoryXExtend64:
5668 case Match_InvalidMemoryXExtend128:
5669 case Match_InvalidMemoryIndexed1SImm4:
5670 case Match_InvalidMemoryIndexed2SImm4:
5671 case Match_InvalidMemoryIndexed3SImm4:
5672 case Match_InvalidMemoryIndexed4SImm4:
5673 case Match_InvalidMemoryIndexed1SImm6:
5674 case Match_InvalidMemoryIndexed16SImm4:
5675 case Match_InvalidMemoryIndexed32SImm4:
5676 case Match_InvalidMemoryIndexed4SImm7:
5677 case Match_InvalidMemoryIndexed8SImm7:
5678 case Match_InvalidMemoryIndexed16SImm7:
5679 case Match_InvalidMemoryIndexed8UImm5:
5680 case Match_InvalidMemoryIndexed4UImm5:
5681 case Match_InvalidMemoryIndexed2UImm5:
5682 case Match_InvalidMemoryIndexed1UImm6:
5683 case Match_InvalidMemoryIndexed2UImm6:
5684 case Match_InvalidMemoryIndexed4UImm6:
5685 case Match_InvalidMemoryIndexed8UImm6:
5686 case Match_InvalidMemoryIndexed16UImm6:
5687 case Match_InvalidMemoryIndexedSImm6:
5688 case Match_InvalidMemoryIndexedSImm5:
5689 case Match_InvalidMemoryIndexedSImm8:
5690 case Match_InvalidMemoryIndexedSImm9:
5691 case Match_InvalidMemoryIndexed16SImm9:
5692 case Match_InvalidMemoryIndexed8SImm10:
5693 case Match_InvalidImm0_1:
5694 case Match_InvalidImm0_3:
5695 case Match_InvalidImm0_7:
5696 case Match_InvalidImm0_15:
5697 case Match_InvalidImm0_31:
5698 case Match_InvalidImm0_63:
5699 case Match_InvalidImm0_127:
5700 case Match_InvalidImm0_255:
5701 case Match_InvalidImm0_65535:
5702 case Match_InvalidImm1_8:
5703 case Match_InvalidImm1_16:
5704 case Match_InvalidImm1_32:
5705 case Match_InvalidImm1_64:
5706 case Match_InvalidSVEAddSubImm8:
5707 case Match_InvalidSVEAddSubImm16:
5708 case Match_InvalidSVEAddSubImm32:
5709 case Match_InvalidSVEAddSubImm64:
5710 case Match_InvalidSVECpyImm8:
5711 case Match_InvalidSVECpyImm16:
5712 case Match_InvalidSVECpyImm32:
5713 case Match_InvalidSVECpyImm64:
5714 case Match_InvalidIndexRange1_1:
5715 case Match_InvalidIndexRange0_15:
5716 case Match_InvalidIndexRange0_7:
5717 case Match_InvalidIndexRange0_3:
5718 case Match_InvalidIndexRange0_1:
5719 case Match_InvalidSVEIndexRange0_63:
5720 case Match_InvalidSVEIndexRange0_31:
5721 case Match_InvalidSVEIndexRange0_15:
5722 case Match_InvalidSVEIndexRange0_7:
5723 case Match_InvalidSVEIndexRange0_3:
5724 case Match_InvalidLabel:
5725 case Match_InvalidComplexRotationEven:
5726 case Match_InvalidComplexRotationOdd:
5727 case Match_InvalidGPR64shifted8:
5728 case Match_InvalidGPR64shifted16:
5729 case Match_InvalidGPR64shifted32:
5730 case Match_InvalidGPR64shifted64:
5731 case Match_InvalidGPR64shifted128:
5732 case Match_InvalidGPR64NoXZRshifted8:
5733 case Match_InvalidGPR64NoXZRshifted16:
5734 case Match_InvalidGPR64NoXZRshifted32:
5735 case Match_InvalidGPR64NoXZRshifted64:
5736 case Match_InvalidGPR64NoXZRshifted128:
5737 case Match_InvalidZPR32UXTW8:
5738 case Match_InvalidZPR32UXTW16:
5739 case Match_InvalidZPR32UXTW32:
5740 case Match_InvalidZPR32UXTW64:
5741 case Match_InvalidZPR32SXTW8:
5742 case Match_InvalidZPR32SXTW16:
5743 case Match_InvalidZPR32SXTW32:
5744 case Match_InvalidZPR32SXTW64:
5745 case Match_InvalidZPR64UXTW8:
5746 case Match_InvalidZPR64SXTW8:
5747 case Match_InvalidZPR64UXTW16:
5748 case Match_InvalidZPR64SXTW16:
5749 case Match_InvalidZPR64UXTW32:
5750 case Match_InvalidZPR64SXTW32:
5751 case Match_InvalidZPR64UXTW64:
5752 case Match_InvalidZPR64SXTW64:
5753 case Match_InvalidZPR32LSL8:
5754 case Match_InvalidZPR32LSL16:
5755 case Match_InvalidZPR32LSL32:
5756 case Match_InvalidZPR32LSL64:
5757 case Match_InvalidZPR64LSL8:
5758 case Match_InvalidZPR64LSL16:
5759 case Match_InvalidZPR64LSL32:
5760 case Match_InvalidZPR64LSL64:
5761 case Match_InvalidZPR0:
5762 case Match_InvalidZPR8:
5763 case Match_InvalidZPR16:
5764 case Match_InvalidZPR32:
5765 case Match_InvalidZPR64:
5766 case Match_InvalidZPR128:
5767 case Match_InvalidZPR_3b8:
5768 case Match_InvalidZPR_3b16:
5769 case Match_InvalidZPR_3b32:
5770 case Match_InvalidZPR_4b16:
5771 case Match_InvalidZPR_4b32:
5772 case Match_InvalidZPR_4b64:
5773 case Match_InvalidSVEPredicateAnyReg:
5774 case Match_InvalidSVEPattern:
5775 case Match_InvalidSVEPredicateBReg:
5776 case Match_InvalidSVEPredicateHReg:
5777 case Match_InvalidSVEPredicateSReg:
5778 case Match_InvalidSVEPredicateDReg:
5779 case Match_InvalidSVEPredicate3bAnyReg:
5780 case Match_InvalidSVEExactFPImmOperandHalfOne:
5781 case Match_InvalidSVEExactFPImmOperandHalfTwo:
5782 case Match_InvalidSVEExactFPImmOperandZeroOne:
5783 case Match_InvalidMatrixTile32:
5784 case Match_InvalidMatrixTile64:
5785 case Match_InvalidMatrix:
5786 case Match_InvalidMatrixTileVectorH8:
5787 case Match_InvalidMatrixTileVectorH16:
5788 case Match_InvalidMatrixTileVectorH32:
5789 case Match_InvalidMatrixTileVectorH64:
5790 case Match_InvalidMatrixTileVectorH128:
5791 case Match_InvalidMatrixTileVectorV8:
5792 case Match_InvalidMatrixTileVectorV16:
5793 case Match_InvalidMatrixTileVectorV32:
5794 case Match_InvalidMatrixTileVectorV64:
5795 case Match_InvalidMatrixTileVectorV128:
5796 case Match_InvalidSVCR:
5797 case Match_InvalidMatrixIndexGPR32_12_15:
5798 case Match_MSR:
5799 case Match_MRS: {
5800 if (ErrorInfo >= Operands.size())
5801 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5802 // Any time we get here, there's nothing fancy to do. Just get the
5803 // operand SMLoc and display the diagnostic.
5804 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5805 if (ErrorLoc == SMLoc())
5806 ErrorLoc = IDLoc;
5807 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5808 }
5809 }
5810
5811 llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5811)
;
5812}
5813
5814/// ParseDirective parses the arm specific directives
5815bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5816 const MCContext::Environment Format = getContext().getObjectFileType();
5817 bool IsMachO = Format == MCContext::IsMachO;
5818 bool IsCOFF = Format == MCContext::IsCOFF;
5819
5820 auto IDVal = DirectiveID.getIdentifier().lower();
5821 SMLoc Loc = DirectiveID.getLoc();
5822 if (IDVal == ".arch")
5823 parseDirectiveArch(Loc);
5824 else if (IDVal == ".cpu")
5825 parseDirectiveCPU(Loc);
5826 else if (IDVal == ".tlsdesccall")
5827 parseDirectiveTLSDescCall(Loc);
5828 else if (IDVal == ".ltorg" || IDVal == ".pool")
5829 parseDirectiveLtorg(Loc);
5830 else if (IDVal == ".unreq")
5831 parseDirectiveUnreq(Loc);
5832 else if (IDVal == ".inst")
5833 parseDirectiveInst(Loc);
5834 else if (IDVal == ".cfi_negate_ra_state")
5835 parseDirectiveCFINegateRAState();
5836 else if (IDVal == ".cfi_b_key_frame")
5837 parseDirectiveCFIBKeyFrame();
5838 else if (IDVal == ".arch_extension")
5839 parseDirectiveArchExtension(Loc);
5840 else if (IDVal == ".variant_pcs")
5841 parseDirectiveVariantPCS(Loc);
5842 else if (IsMachO) {
5843 if (IDVal == MCLOHDirectiveName())
5844 parseDirectiveLOH(IDVal, Loc);
5845 else
5846 return true;
5847 } else if (IsCOFF) {
5848 if (IDVal == ".seh_stackalloc")
5849 parseDirectiveSEHAllocStack(Loc);
5850 else if (IDVal == ".seh_endprologue")
5851 parseDirectiveSEHPrologEnd(Loc);
5852 else if (IDVal == ".seh_save_r19r20_x")
5853 parseDirectiveSEHSaveR19R20X(Loc);
5854 else if (IDVal == ".seh_save_fplr")
5855 parseDirectiveSEHSaveFPLR(Loc);
5856 else if (IDVal == ".seh_save_fplr_x")
5857 parseDirectiveSEHSaveFPLRX(Loc);
5858 else if (IDVal == ".seh_save_reg")
5859 parseDirectiveSEHSaveReg(Loc);
5860 else if (IDVal == ".seh_save_reg_x")
5861 parseDirectiveSEHSaveRegX(Loc);
5862 else if (IDVal == ".seh_save_regp")
5863 parseDirectiveSEHSaveRegP(Loc);
5864 else if (IDVal == ".seh_save_regp_x")
5865 parseDirectiveSEHSaveRegPX(Loc);
5866 else if (IDVal == ".seh_save_lrpair")
5867 parseDirectiveSEHSaveLRPair(Loc);
5868 else if (IDVal == ".seh_save_freg")
5869 parseDirectiveSEHSaveFReg(Loc);
5870 else if (IDVal == ".seh_save_freg_x")
5871 parseDirectiveSEHSaveFRegX(Loc);
5872 else if (IDVal == ".seh_save_fregp")
5873 parseDirectiveSEHSaveFRegP(Loc);
5874 else if (IDVal == ".seh_save_fregp_x")
5875 parseDirectiveSEHSaveFRegPX(Loc);
5876 else if (IDVal == ".seh_set_fp")
5877 parseDirectiveSEHSetFP(Loc);
5878 else if (IDVal == ".seh_add_fp")
5879 parseDirectiveSEHAddFP(Loc);
5880 else if (IDVal == ".seh_nop")
5881 parseDirectiveSEHNop(Loc);
5882 else if (IDVal == ".seh_save_next")
5883 parseDirectiveSEHSaveNext(Loc);
5884 else if (IDVal == ".seh_startepilogue")
5885 parseDirectiveSEHEpilogStart(Loc);
5886 else if (IDVal == ".seh_endepilogue")
5887 parseDirectiveSEHEpilogEnd(Loc);
5888 else if (IDVal == ".seh_trap_frame")
5889 parseDirectiveSEHTrapFrame(Loc);
5890 else if (IDVal == ".seh_pushframe")
5891 parseDirectiveSEHMachineFrame(Loc);
5892 else if (IDVal == ".seh_context")
5893 parseDirectiveSEHContext(Loc);
5894 else if (IDVal == ".seh_clear_unwound_to_call")
5895 parseDirectiveSEHClearUnwoundToCall(Loc);
5896 else
5897 return true;
5898 } else
5899 return true;
5900 return false;
5901}
5902
5903static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
5904 SmallVector<StringRef, 4> &RequestedExtensions) {
5905 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
5906 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
5907
5908 if (!NoCrypto && Crypto) {
5909 switch (ArchKind) {
5910 default:
5911 // Map 'generic' (and others) to sha2 and aes, because
5912 // that was the traditional meaning of crypto.
5913 case AArch64::ArchKind::ARMV8_1A:
5914 case AArch64::ArchKind::ARMV8_2A:
5915 case AArch64::ArchKind::ARMV8_3A:
5916 RequestedExtensions.push_back("sha2");
5917 RequestedExtensions.push_back("aes");
5918 break;
5919 case AArch64::ArchKind::ARMV8_4A:
5920 case AArch64::ArchKind::ARMV8_5A:
5921 case AArch64::ArchKind::ARMV8_6A:
5922 case AArch64::ArchKind::ARMV8_7A:
5923 case AArch64::ArchKind::ARMV8R:
5924 RequestedExtensions.push_back("sm4");
5925 RequestedExtensions.push_back("sha3");
5926 RequestedExtensions.push_back("sha2");
5927 RequestedExtensions.push_back("aes");
5928 break;
5929 }
5930 } else if (NoCrypto) {
5931 switch (ArchKind) {
5932 default:
5933 // Map 'generic' (and others) to sha2 and aes, because
5934 // that was the traditional meaning of crypto.
5935 case AArch64::ArchKind::ARMV8_1A:
5936 case AArch64::ArchKind::ARMV8_2A:
5937 case AArch64::ArchKind::ARMV8_3A:
5938 RequestedExtensions.push_back("nosha2");
5939 RequestedExtensions.push_back("noaes");
5940 break;
5941 case AArch64::ArchKind::ARMV8_4A:
5942 case AArch64::ArchKind::ARMV8_5A:
5943 case AArch64::ArchKind::ARMV8_6A:
5944 case AArch64::ArchKind::ARMV8_7A:
5945 RequestedExtensions.push_back("nosm4");
5946 RequestedExtensions.push_back("nosha3");
5947 RequestedExtensions.push_back("nosha2");
5948 RequestedExtensions.push_back("noaes");
5949 break;
5950 }
5951 }
5952}
5953
5954/// parseDirectiveArch
5955/// ::= .arch token
5956bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
5957 SMLoc ArchLoc = getLoc();
5958
5959 StringRef Arch, ExtensionString;
5960 std::tie(Arch, ExtensionString) =
5961 getParser().parseStringToEndOfStatement().trim().split('+');
5962
5963 AArch64::ArchKind ID = AArch64::parseArch(Arch);
5964 if (ID == AArch64::ArchKind::INVALID)
5965 return Error(ArchLoc, "unknown arch name");
5966
5967 if (parseToken(AsmToken::EndOfStatement))
5968 return true;
5969
5970 // Get the architecture and extension features.
5971 std::vector<StringRef> AArch64Features;
5972 AArch64::getArchFeatures(ID, AArch64Features);
5973 AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
5974 AArch64Features);
5975
5976 MCSubtargetInfo &STI = copySTI();
5977 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
5978 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
5979 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
5980
5981 SmallVector<StringRef, 4> RequestedExtensions;
5982 if (!ExtensionString.empty())
5983 ExtensionString.split(RequestedExtensions, '+');
5984
5985 ExpandCryptoAEK(ID, RequestedExtensions);
5986
5987 FeatureBitset Features = STI.getFeatureBits();
5988 for (auto Name : RequestedExtensions) {
5989 bool EnableFeature = true;
5990
5991 if (Name.startswith_insensitive("no")) {
5992 EnableFeature = false;
5993 Name = Name.substr(2);
5994 }
5995
5996 for (const auto &Extension : ExtensionMap) {
5997 if (Extension.Name != Name)
5998 continue;
5999
6000 if (Extension.Features.none())
6001 report_fatal_error("unsupported architectural extension: " + Name);
6002
6003 FeatureBitset ToggleFeatures = EnableFeature
6004 ? (~Features & Extension.Features)
6005 : ( Features & Extension.Features);
6006 FeatureBitset Features =
6007 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
6008 setAvailableFeatures(Features);
6009 break;
6010 }
6011 }
6012 return false;
6013}
6014
6015/// parseDirectiveArchExtension
6016/// ::= .arch_extension [no]feature
6017bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
6018 SMLoc ExtLoc = getLoc();
6019
6020 StringRef Name = getParser().parseStringToEndOfStatement().trim();
6021
6022 if (parseToken(AsmToken::EndOfStatement,
6023 "unexpected token in '.arch_extension' directive"))
6024 return true;
6025
6026 bool EnableFeature = true;
6027 if (Name.startswith_insensitive("no")) {
6028 EnableFeature = false;
6029 Name = Name.substr(2);
6030 }
6031
6032 MCSubtargetInfo &STI = copySTI();
6033 FeatureBitset Features = STI.getFeatureBits();
6034 for (const auto &Extension : ExtensionMap) {
6035 if (Extension.Name != Name)
6036 continue;
6037
6038 if (Extension.Features.none())
6039 return Error(ExtLoc, "unsupported architectural extension: " + Name);
6040
6041 FeatureBitset ToggleFeatures = EnableFeature
6042 ? (~Features & Extension.Features)
6043 : (Features & Extension.Features);
6044 FeatureBitset Features =
6045 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
6046 setAvailableFeatures(Features);
6047 return false;
6048 }
6049
6050 return Error(ExtLoc, "unknown architectural extension: " + Name);
6051}
6052
6053static SMLoc incrementLoc(SMLoc L, int Offset) {
6054 return SMLoc::getFromPointer(L.getPointer() + Offset);
6055}
6056
6057/// parseDirectiveCPU
6058/// ::= .cpu id
6059bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
6060 SMLoc CurLoc = getLoc();
6061
6062 StringRef CPU, ExtensionString;
6063 std::tie(CPU, ExtensionString) =
6064 getParser().parseStringToEndOfStatement().trim().split('+');
6065
6066 if (parseToken(AsmToken::EndOfStatement))
6067 return true;
6068
6069 SmallVector<StringRef, 4> RequestedExtensions;
6070 if (!ExtensionString.empty())
6071 ExtensionString.split(RequestedExtensions, '+');
6072
6073 // FIXME This is using tablegen data, but should be moved to ARMTargetParser
6074 // once that is tablegen'ed
6075 if (!getSTI().isCPUStringValid(CPU)) {
6076 Error(CurLoc, "unknown CPU name");
6077 return false;
6078 }
6079
6080 MCSubtargetInfo &STI = copySTI();
6081 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
6082 CurLoc = incrementLoc(CurLoc, CPU.size());
6083
6084 ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
6085
6086 FeatureBitset Features = STI.getFeatureBits();
6087 for (auto Name : RequestedExtensions) {
6088 // Advance source location past '+'.
6089 CurLoc = incrementLoc(CurLoc, 1);
6090
6091 bool EnableFeature = true;
6092
6093 if (Name.startswith_insensitive("no")) {
6094 EnableFeature = false;
6095 Name = Name.substr(2);
6096 }
6097
6098 bool FoundExtension = false;
6099 for (const auto &Extension : ExtensionMap) {
6100 if (Extension.Name != Name)
6101 continue;
6102
6103 if (Extension.Features.none())
6104 report_fatal_error("unsupported architectural extension: " + Name);
6105
6106 FeatureBitset ToggleFeatures = EnableFeature
6107 ? (~Features & Extension.Features)
6108 : ( Features & Extension.Features);
6109 FeatureBitset Features =
6110 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
6111 setAvailableFeatures(Features);
6112 FoundExtension = true;
6113
6114 break;
6115 }
6116
6117 if (!FoundExtension)
6118 Error(CurLoc, "unsupported architectural extension");
6119
6120 CurLoc = incrementLoc(CurLoc, Name.size());
6121 }
6122 return false;
6123}
6124
6125/// parseDirectiveInst
6126/// ::= .inst opcode [, ...]
6127bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
6128 if (getLexer().is(AsmToken::EndOfStatement))
6129 return Error(Loc, "expected expression following '.inst' directive");
6130
6131 auto parseOp = [&]() -> bool {
6132 SMLoc L = getLoc();
6133 const MCExpr *Expr = nullptr;
6134 if (check(getParser().parseExpression(Expr), L, "expected expression"))
6135 return true;
6136 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
6137 if (check(!Value, L, "expected constant expression"))
6138 return true;
6139 getTargetStreamer().emitInst(Value->getValue());
6140 return false;
6141 };
6142
6143 return parseMany(parseOp);
6144}
6145
6146// parseDirectiveTLSDescCall:
6147// ::= .tlsdesccall symbol
6148bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
6149 StringRef Name;
6150 if (check(getParser().parseIdentifier(Name), L,
6151 "expected symbol after directive") ||
6152 parseToken(AsmToken::EndOfStatement))
6153 return true;
6154
6155 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
6156 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
6157 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
6158
6159 MCInst Inst;
6160 Inst.setOpcode(AArch64::TLSDESCCALL);
6161 Inst.addOperand(MCOperand::createExpr(Expr));
6162
6163 getParser().getStreamer().emitInstruction(Inst, getSTI());
6164 return false;
6165}
6166
6167/// ::= .loh <lohName | lohId> label1, ..., labelN
6168/// The number of arguments depends on the loh identifier.
6169bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
6170 MCLOHType Kind;
6171 if (getTok().isNot(AsmToken::Identifier)) {
6172 if (getTok().isNot(AsmToken::Integer))
6173 return TokError("expected an identifier or a number in directive");
6174 // We successfully get a numeric value for the identifier.
6175 // Check if it is valid.
6176 int64_t Id = getTok().getIntVal();
6177 if (Id <= -1U && !isValidMCLOHType(Id))
6178 return TokError("invalid numeric identifier in directive");
6179 Kind = (MCLOHType)Id;
6180 } else {
6181 StringRef Name = getTok().getIdentifier();
6182 // We successfully parse an identifier.
6183 // Check if it is a recognized one.
6184 int Id = MCLOHNameToId(Name);
6185
6186 if (Id == -1)
6187 return TokError("invalid identifier in directive");
6188 Kind = (MCLOHType)Id;
6189 }
6190 // Consume the identifier.
6191 Lex();
6192 // Get the number of arguments of this LOH.
6193 int NbArgs = MCLOHIdToNbArgs(Kind);
6194
6195 assert(NbArgs != -1 && "Invalid number of arguments")(static_cast <bool> (NbArgs != -1 && "Invalid number of arguments"
) ? void (0) : __assert_fail ("NbArgs != -1 && \"Invalid number of arguments\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 6195, __extension__ __PRETTY_FUNCTION__))
;
6196
6197 SmallVector<MCSymbol *, 3> Args;
6198 for (int Idx = 0; Idx < NbArgs; ++Idx) {
6199 StringRef Name;
6200 if (getParser().parseIdentifier(Name))
6201 return TokError("expected identifier in directive");
6202 Args.push_back(getContext().getOrCreateSymbol(Name));
6203
6204 if (Idx + 1 == NbArgs)
6205 break;
6206 if (parseToken(AsmToken::Comma,
6207 "unexpected token in '" + Twine(IDVal) + "' directive"))
6208 return true;
6209 }
6210 if (parseToken(AsmToken::EndOfStatement,
6211 "unexpected token in '" + Twine(IDVal) + "' directive"))
6212 return true;
6213
6214 getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
6215 return false;
6216}
6217
6218/// parseDirectiveLtorg
6219/// ::= .ltorg | .pool
6220bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
6221 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
6222 return true;
6223 getTargetStreamer().emitCurrentConstantPool();
6224 return false;
6225}
6226
6227/// parseDirectiveReq
6228/// ::= name .req registername
6229bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
6230 Lex(); // Eat the '.req' token.
6231 SMLoc SRegLoc = getLoc();
6232 RegKind RegisterKind = RegKind::Scalar;
6233 unsigned RegNum;
6234 OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
6235
6236 if (ParseRes != MatchOperand_Success) {
6237 StringRef Kind;
6238 RegisterKind = RegKind::NeonVector;
6239 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
6240
6241 if (ParseRes == MatchOperand_ParseFail)
6242 return true;
6243
6244 if (ParseRes == MatchOperand_Success && !Kind.empty())
6245 return Error(SRegLoc, "vector register without type specifier expected");
6246 }
6247
6248 if (ParseRes != MatchOperand_Success) {
6249 StringRef Kind;
6250 RegisterKind = RegKind::SVEDataVector;
6251 ParseRes =
6252 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
6253
6254 if (ParseRes == MatchOperand_ParseFail)
6255 return true;
6256
6257 if (ParseRes == MatchOperand_Success && !Kind.empty())
6258 return Error(SRegLoc,
6259 "sve vector register without type specifier expected");
6260 }
6261
6262 if (ParseRes != MatchOperand_Success) {
6263 StringRef Kind;
6264 RegisterKind = RegKind::SVEPredicateVector;
6265 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
6266
6267 if (ParseRes == MatchOperand_ParseFail)
6268 return true;
6269
6270 if (ParseRes == MatchOperand_Success && !Kind.empty())
6271 return Error(SRegLoc,
6272 "sve predicate register without type specifier expected");
6273 }
6274
6275 if (ParseRes != MatchOperand_Success)
6276 return Error(SRegLoc, "register name or alias expected");
6277
6278 // Shouldn't be anything else.
6279 if (parseToken(AsmToken::EndOfStatement,
6280 "unexpected input in .req directive"))
6281 return true;
6282
6283 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
6284 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
6285 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
6286
6287 return false;
6288}
6289
6290/// parseDirectiveUneq
6291/// ::= .unreq registername
6292bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
6293 if (getTok().isNot(AsmToken::Identifier))
6294 return TokError("unexpected input in .unreq directive.");
6295 RegisterReqs.erase(getTok().getIdentifier().lower());
6296 Lex(); // Eat the identifier.
6297 return parseToken(AsmToken::EndOfStatement);
6298}
6299
6300bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
6301 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
6302 return true;
6303 getStreamer().emitCFINegateRAState();
6304 return false;
6305}
6306
6307/// parseDirectiveCFIBKeyFrame
6308/// ::= .cfi_b_key
6309bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
6310 if (parseToken(AsmToken::EndOfStatement,
6311 "unexpected token in '.cfi_b_key_frame'"))
6312 return true;
6313 getStreamer().emitCFIBKeyFrame();
6314 return false;
6315}
6316
6317/// parseDirectiveVariantPCS
6318/// ::= .variant_pcs symbolname
6319bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
6320 const AsmToken &Tok = getTok();
6321 if (Tok.isNot(AsmToken::Identifier))
6322 return TokError("expected symbol name");
6323
6324 StringRef SymbolName = Tok.getIdentifier();
6325
6326 MCSymbol *Sym = getContext().lookupSymbol(SymbolName);
6327 if (!Sym)
6328 return TokError("unknown symbol");
6329
6330 Lex(); // Eat the symbol
6331
6332 if (parseEOL())
6333 return true;
6334 getTargetStreamer().emitDirectiveVariantPCS(Sym);
6335 return false;
6336}
6337
6338/// parseDirectiveSEHAllocStack
6339/// ::= .seh_stackalloc
6340bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
6341 int64_t Size;
6342 if (parseImmExpr(Size))
6343 return true;
6344 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
6345 return false;
6346}
6347
6348/// parseDirectiveSEHPrologEnd
6349/// ::= .seh_endprologue
6350bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
6351 getTargetStreamer().emitARM64WinCFIPrologEnd();
6352 return false;
6353}
6354
6355/// parseDirectiveSEHSaveR19R20X
6356/// ::= .seh_save_r19r20_x
6357bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
6358 int64_t Offset;
6359 if (parseImmExpr(Offset))
6360 return true;
6361 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
6362 return false;
6363}
6364
6365/// parseDirectiveSEHSaveFPLR
6366/// ::= .seh_save_fplr
6367bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
6368 int64_t Offset;
6369 if (parseImmExpr(Offset))
6370 return true;
6371 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
6372 return false;
6373}
6374
6375/// parseDirectiveSEHSaveFPLRX
6376/// ::= .seh_save_fplr_x
6377bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
6378 int64_t Offset;
6379 if (parseImmExpr(Offset))
6380 return true;
6381 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
6382 return false;
6383}
6384
6385/// parseDirectiveSEHSaveReg
6386/// ::= .seh_save_reg
6387bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
6388 unsigned Reg;
6389 int64_t Offset;
6390 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
6391 parseComma() || parseImmExpr(Offset))
6392 return true;
6393 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
6394 return false;
6395}
6396
6397/// parseDirectiveSEHSaveRegX
6398/// ::= .seh_save_reg_x
6399bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
6400 unsigned Reg;
6401 int64_t Offset;
6402 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
6403 parseComma() || parseImmExpr(Offset))
6404 return true;
6405 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
6406 return false;
6407}
6408
6409/// parseDirectiveSEHSaveRegP
6410/// ::= .seh_save_regp
6411bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
6412 unsigned Reg;
6413 int64_t Offset;
6414 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
6415 parseComma() || parseImmExpr(Offset))
6416 return true;
6417 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
6418 return false;
6419}
6420
6421/// parseDirectiveSEHSaveRegPX
6422/// ::= .seh_save_regp_x
6423bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
6424 unsigned Reg;
6425 int64_t Offset;
6426 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
6427 parseComma() || parseImmExpr(Offset))
6428 return true;
6429 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
6430 return false;
6431}
6432
6433/// parseDirectiveSEHSaveLRPair
6434/// ::= .seh_save_lrpair
6435bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
6436 unsigned Reg;
6437 int64_t Offset;
6438 L = getLoc();
6439 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
6440 parseComma() || parseImmExpr(Offset))
6441 return true;
6442 if (check(((Reg - 19) % 2 != 0), L,
6443 "expected register with even offset from x19"))
6444 return true;
6445 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
6446 return false;
6447}
6448
6449/// parseDirectiveSEHSaveFReg
6450/// ::= .seh_save_freg
6451bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
6452 unsigned Reg;
6453 int64_t Offset;
6454 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
6455 parseComma() || parseImmExpr(Offset))
6456 return true;
6457 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
6458 return false;
6459}
6460
6461/// parseDirectiveSEHSaveFRegX
6462/// ::= .seh_save_freg_x
6463bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
6464 unsigned Reg;
6465 int64_t Offset;
6466 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
6467 parseComma() || parseImmExpr(Offset))
6468 return true;
6469 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
6470 return false;
6471}
6472
6473/// parseDirectiveSEHSaveFRegP
6474/// ::= .seh_save_fregp
6475bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
6476 unsigned Reg;
6477 int64_t Offset;
6478 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
6479 parseComma() || parseImmExpr(Offset))
6480 return true;
6481 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
6482 return false;
6483}
6484
6485/// parseDirectiveSEHSaveFRegPX
6486/// ::= .seh_save_fregp_x
6487bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
6488 unsigned Reg;
6489 int64_t Offset;
6490 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
6491 parseComma() || parseImmExpr(Offset))
6492 return true;
6493 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
6494 return false;
6495}
6496
6497/// parseDirectiveSEHSetFP
6498/// ::= .seh_set_fp
6499bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
6500 getTargetStreamer().emitARM64WinCFISetFP();
6501 return false;
6502}
6503
6504/// parseDirectiveSEHAddFP
6505/// ::= .seh_add_fp
6506bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
6507 int64_t Size;
6508 if (parseImmExpr(Size))
6509 return true;
6510 getTargetStreamer().emitARM64WinCFIAddFP(Size);
6511 return false;
6512}
6513
6514/// parseDirectiveSEHNop
6515/// ::= .seh_nop
6516bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
6517 getTargetStreamer().emitARM64WinCFINop();
6518 return false;
6519}
6520
6521/// parseDirectiveSEHSaveNext
6522/// ::= .seh_save_next
6523bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
6524 getTargetStreamer().emitARM64WinCFISaveNext();
6525 return false;
6526}
6527
6528/// parseDirectiveSEHEpilogStart
6529/// ::= .seh_startepilogue
6530bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
6531 getTargetStreamer().emitARM64WinCFIEpilogStart();
6532 return false;
6533}
6534
6535/// parseDirectiveSEHEpilogEnd
6536/// ::= .seh_endepilogue
6537bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
6538 getTargetStreamer().emitARM64WinCFIEpilogEnd();
6539 return false;
6540}
6541
6542/// parseDirectiveSEHTrapFrame
6543/// ::= .seh_trap_frame
6544bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
6545 getTargetStreamer().emitARM64WinCFITrapFrame();
6546 return false;
6547}
6548
6549/// parseDirectiveSEHMachineFrame
6550/// ::= .seh_pushframe
6551bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
6552 getTargetStreamer().emitARM64WinCFIMachineFrame();
6553 return false;
6554}
6555
6556/// parseDirectiveSEHContext
6557/// ::= .seh_context
6558bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
6559 getTargetStreamer().emitARM64WinCFIContext();
6560 return false;
6561}
6562
6563/// parseDirectiveSEHClearUnwoundToCall
6564/// ::= .seh_clear_unwound_to_call
6565bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
6566 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
6567 return false;
6568}
6569
6570bool
6571AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
6572 AArch64MCExpr::VariantKind &ELFRefKind,
6573 MCSymbolRefExpr::VariantKind &DarwinRefKind,
6574 int64_t &Addend) {
6575 ELFRefKind = AArch64MCExpr::VK_INVALID;
6576 DarwinRefKind = MCSymbolRefExpr::VK_None;
6577 Addend = 0;
6578
6579 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
6580 ELFRefKind = AE->getKind();
6581 Expr = AE->getSubExpr();
6582 }
6583
6584 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
6585 if (SE) {
6586 // It's a simple symbol reference with no addend.
6587 DarwinRefKind = SE->getKind();
6588 return true;
6589 }
6590
6591 // Check that it looks like a symbol + an addend
6592 MCValue Res;
6593 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
6594 if (!Relocatable || Res.getSymB())
6595 return false;
6596
6597 // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
6598 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
6599 if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
6600 return false;
6601
6602 if (Res.getSymA())
6603 DarwinRefKind = Res.getSymA()->getKind();
6604 Addend = Res.getConstant();
6605
6606 // It's some symbol reference + a constant addend, but really
6607 // shouldn't use both Darwin and ELF syntax.
6608 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
6609 DarwinRefKind == MCSymbolRefExpr::VK_None;
6610}
6611
6612/// Force static initialization.
6613extern "C" LLVM_EXTERNAL_VISIBILITY__attribute__ ((visibility("default"))) void LLVMInitializeAArch64AsmParser() {
6614 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
6615 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
6616 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
6617 RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target());
6618 RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target());
6619}
6620
6621#define GET_REGISTER_MATCHER
6622#define GET_SUBTARGET_FEATURE_NAME
6623#define GET_MATCHER_IMPLEMENTATION
6624#define GET_MNEMONIC_SPELL_CHECKER
6625#include "AArch64GenAsmMatcher.inc"
6626
6627// Define this matcher function after the auto-generated include so we
6628// have the match class enum definitions.
6629unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
6630 unsigned Kind) {
6631 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
6632 // If the kind is a token for a literal immediate, check if our asm
6633 // operand matches. This is for InstAliases which have a fixed-value
6634 // immediate in the syntax.
6635 int64_t ExpectedVal;
1
'ExpectedVal' declared without an initial value
6636 switch (Kind) {
2
Control jumps to 'case MCK_MPR:' at line 6678
6637 default:
6638 return Match_InvalidOperand;
6639 case MCK__HASH_0:
6640 ExpectedVal = 0;
6641 break;
6642 case MCK__HASH_1:
6643 ExpectedVal = 1;
6644 break;
6645 case MCK__HASH_12:
6646 ExpectedVal = 12;
6647 break;
6648 case MCK__HASH_16:
6649 ExpectedVal = 16;
6650 break;
6651 case MCK__HASH_2:
6652 ExpectedVal = 2;
6653 break;
6654 case MCK__HASH_24:
6655 ExpectedVal = 24;
6656 break;
6657 case MCK__HASH_3:
6658 ExpectedVal = 3;
6659 break;
6660 case MCK__HASH_32:
6661 ExpectedVal = 32;
6662 break;
6663 case MCK__HASH_4:
6664 ExpectedVal = 4;
6665 break;
6666 case MCK__HASH_48:
6667 ExpectedVal = 48;
6668 break;
6669 case MCK__HASH_6:
6670 ExpectedVal = 6;
6671 break;
6672 case MCK__HASH_64:
6673 ExpectedVal = 64;
6674 break;
6675 case MCK__HASH_8:
6676 ExpectedVal = 8;
6677 break;
6678 case MCK_MPR:
6679 // If the Kind is a token for the MPR register class which has the "za"
6680 // register (SME accumulator array), check if the asm is a literal "za"
6681 // token. This is for the "smstart za" alias that defines the register
6682 // as a literal token.
6683 if (Op.isTokenEqual("za"))
3
Calling 'AArch64Operand::isTokenEqual'
6
Returning from 'AArch64Operand::isTokenEqual'
7
Taking false branch
6684 return Match_Success;
6685 break;
6686 }
6687 if (!Op.isImm())
8
Execution continues on line 6687
9
Assuming the condition is false
10
Taking false branch
6688 return Match_InvalidOperand;
6689 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
11
Assuming the object is a 'MCConstantExpr'
6690 if (!CE
11.1
'CE' is non-null
)
12
Taking false branch
6691 return Match_InvalidOperand;
6692 if (CE->getValue() == ExpectedVal)
13
The right operand of '==' is a garbage value
6693 return Match_Success;
6694 return Match_InvalidOperand;
6695}
6696
6697OperandMatchResultTy
6698AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
6699
6700 SMLoc S = getLoc();
6701
6702 if (getTok().isNot(AsmToken::Identifier)) {
6703 Error(S, "expected register");
6704 return MatchOperand_ParseFail;
6705 }
6706
6707 unsigned FirstReg;
6708 OperandMatchResultTy Res = tryParseScalarRegister(FirstReg);
6709 if (Res != MatchOperand_Success)
6710 return MatchOperand_ParseFail;
6711
6712 const MCRegisterClass &WRegClass =
6713 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
6714 const MCRegisterClass &XRegClass =
6715 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
6716
6717 bool isXReg = XRegClass.contains(FirstReg),
6718 isWReg = WRegClass.contains(FirstReg);
6719 if (!isXReg && !isWReg) {
6720 Error(S, "expected first even register of a "
6721 "consecutive same-size even/odd register pair");
6722 return MatchOperand_ParseFail;
6723 }
6724
6725 const MCRegisterInfo *RI = getContext().getRegisterInfo();
6726 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
6727
6728 if (FirstEncoding & 0x1) {
6729 Error(S, "expected first even register of a "
6730 "consecutive same-size even/odd register pair");
6731 return MatchOperand_ParseFail;
6732 }
6733
6734 if (getTok().isNot(AsmToken::Comma)) {
6735 Error(getLoc(), "expected comma");
6736 return MatchOperand_ParseFail;
6737 }
6738 // Eat the comma
6739 Lex();
6740
6741 SMLoc E = getLoc();
6742 unsigned SecondReg;
6743 Res = tryParseScalarRegister(SecondReg);
6744 if (Res != MatchOperand_Success)
6745 return MatchOperand_ParseFail;
6746
6747 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
6748 (isXReg && !XRegClass.contains(SecondReg)) ||
6749 (isWReg && !WRegClass.contains(SecondReg))) {
6750 Error(E,"expected second odd register of a "
6751 "consecutive same-size even/odd register pair");
6752 return MatchOperand_ParseFail;
6753 }
6754
6755 unsigned Pair = 0;
6756 if (isXReg) {
6757 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
6758 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
6759 } else {
6760 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
6761 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
6762 }
6763
6764 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
6765 getLoc(), getContext()));
6766
6767 return MatchOperand_Success;
6768}
6769
6770template <bool ParseShiftExtend, bool ParseSuffix>
6771OperandMatchResultTy
6772AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
6773 const SMLoc S = getLoc();
6774 // Check for a SVE vector register specifier first.
6775 unsigned RegNum;
6776 StringRef Kind;
6777
6778 OperandMatchResultTy Res =
6779 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
6780
6781 if (Res != MatchOperand_Success)
6782 return Res;
6783
6784 if (ParseSuffix && Kind.empty())
6785 return MatchOperand_NoMatch;
6786
6787 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
6788 if (!KindRes)
6789 return MatchOperand_NoMatch;
6790
6791 unsigned ElementWidth = KindRes->second;
6792
6793 // No shift/extend is the default.
6794 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
6795 Operands.push_back(AArch64Operand::CreateVectorReg(
6796 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
6797
6798 OperandMatchResultTy Res = tryParseVectorIndex(Operands);
6799 if (Res == MatchOperand_ParseFail)
6800 return MatchOperand_ParseFail;
6801 return MatchOperand_Success;
6802 }
6803
6804 // Eat the comma
6805 Lex();
6806
6807 // Match the shift
6808 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
6809 Res = tryParseOptionalShiftExtend(ExtOpnd);
6810 if (Res != MatchOperand_Success)
6811 return Res;
6812
6813 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
6814 Operands.push_back(AArch64Operand::CreateVectorReg(
6815 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
6816 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
6817 Ext->hasShiftExtendAmount()));
6818
6819 return MatchOperand_Success;
6820}
6821
6822OperandMatchResultTy
6823AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
6824 MCAsmParser &Parser = getParser();
6825
6826 SMLoc SS = getLoc();
6827 const AsmToken &TokE = getTok();
6828 bool IsHash = TokE.is(AsmToken::Hash);
6829
6830 if (!IsHash && TokE.isNot(AsmToken::Identifier))
6831 return MatchOperand_NoMatch;
6832
6833 int64_t Pattern;
6834 if (IsHash) {
6835 Lex(); // Eat hash
6836
6837 // Parse the immediate operand.
6838 const MCExpr *ImmVal;
6839 SS = getLoc();
6840 if (Parser.parseExpression(ImmVal))
6841 return MatchOperand_ParseFail;
6842
6843 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
6844 if (!MCE)
6845 return MatchOperand_ParseFail;
6846
6847 Pattern = MCE->getValue();
6848 } else {
6849 // Parse the pattern
6850 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
6851 if (!Pat)
6852 return MatchOperand_NoMatch;
6853
6854 Lex();
6855 Pattern = Pat->Encoding;
6856 assert(Pattern >= 0 && Pattern < 32)(static_cast <bool> (Pattern >= 0 && Pattern
< 32) ? void (0) : __assert_fail ("Pattern >= 0 && Pattern < 32"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 6856, __extension__ __PRETTY_FUNCTION__))
;
6857 }
6858
6859 Operands.push_back(
6860 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
6861 SS, getLoc(), getContext()));
6862
6863 return MatchOperand_Success;
6864}
6865
6866OperandMatchResultTy
6867AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
6868 SMLoc SS = getLoc();
6869
6870 unsigned XReg;
6871 if (tryParseScalarRegister(XReg) != MatchOperand_Success)
6872 return MatchOperand_NoMatch;
6873
6874 MCContext &ctx = getContext();
6875 const MCRegisterInfo *RI = ctx.getRegisterInfo();
6876 int X8Reg = RI->getMatchingSuperReg(
6877 XReg, AArch64::x8sub_0,
6878 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
6879 if (!X8Reg) {
6880 Error(SS, "expected an even-numbered x-register in the range [x0,x22]");
6881 return MatchOperand_ParseFail;
6882 }
6883
6884 Operands.push_back(
6885 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
6886 return MatchOperand_Success;
6887}