Bug Summary

File:llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Warning:line 4249, column 15
The left operand of '==' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AArch64AsmParser.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/lib/Target/AArch64/AsmParser -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/lib/Target/AArch64/AsmParser/.. -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/.. -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/lib/Target/AArch64/AsmParser -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-07-26-235520-9401-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "MCTargetDesc/AArch64AddressingModes.h"
10#include "MCTargetDesc/AArch64InstPrinter.h"
11#include "MCTargetDesc/AArch64MCExpr.h"
12#include "MCTargetDesc/AArch64MCTargetDesc.h"
13#include "MCTargetDesc/AArch64TargetStreamer.h"
14#include "TargetInfo/AArch64TargetInfo.h"
15#include "AArch64InstrInfo.h"
16#include "Utils/AArch64BaseInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/StringExtras.h"
23#include "llvm/ADT/StringMap.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/StringSwitch.h"
26#include "llvm/ADT/Twine.h"
27#include "llvm/MC/MCContext.h"
28#include "llvm/MC/MCExpr.h"
29#include "llvm/MC/MCInst.h"
30#include "llvm/MC/MCLinkerOptimizationHint.h"
31#include "llvm/MC/MCObjectFileInfo.h"
32#include "llvm/MC/MCParser/MCAsmLexer.h"
33#include "llvm/MC/MCParser/MCAsmParser.h"
34#include "llvm/MC/MCParser/MCAsmParserExtension.h"
35#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
36#include "llvm/MC/MCParser/MCTargetAsmParser.h"
37#include "llvm/MC/MCRegisterInfo.h"
38#include "llvm/MC/MCStreamer.h"
39#include "llvm/MC/MCSubtargetInfo.h"
40#include "llvm/MC/MCSymbol.h"
41#include "llvm/MC/MCTargetOptions.h"
42#include "llvm/MC/SubtargetFeature.h"
43#include "llvm/MC/MCValue.h"
44#include "llvm/Support/Casting.h"
45#include "llvm/Support/Compiler.h"
46#include "llvm/Support/ErrorHandling.h"
47#include "llvm/Support/MathExtras.h"
48#include "llvm/Support/SMLoc.h"
49#include "llvm/Support/TargetParser.h"
50#include "llvm/Support/TargetRegistry.h"
51#include "llvm/Support/raw_ostream.h"
52#include <cassert>
53#include <cctype>
54#include <cstdint>
55#include <cstdio>
56#include <string>
57#include <tuple>
58#include <utility>
59#include <vector>
60
61using namespace llvm;
62
63namespace {
64
65enum class RegKind {
66 Scalar,
67 NeonVector,
68 SVEDataVector,
69 SVEPredicateVector,
70 Matrix
71};
72
73enum class MatrixKind { Array, Tile, Row, Col };
74
75enum RegConstraintEqualityTy {
76 EqualsReg,
77 EqualsSuperReg,
78 EqualsSubReg
79};
80
81class AArch64AsmParser : public MCTargetAsmParser {
82private:
83 StringRef Mnemonic; ///< Instruction mnemonic.
84
85 // Map of register aliases registers via the .req directive.
86 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
87
88 class PrefixInfo {
89 public:
90 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
91 PrefixInfo Prefix;
92 switch (Inst.getOpcode()) {
93 case AArch64::MOVPRFX_ZZ:
94 Prefix.Active = true;
95 Prefix.Dst = Inst.getOperand(0).getReg();
96 break;
97 case AArch64::MOVPRFX_ZPmZ_B:
98 case AArch64::MOVPRFX_ZPmZ_H:
99 case AArch64::MOVPRFX_ZPmZ_S:
100 case AArch64::MOVPRFX_ZPmZ_D:
101 Prefix.Active = true;
102 Prefix.Predicated = true;
103 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
104 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 105, __extension__ __PRETTY_FUNCTION__))
105 "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 105, __extension__ __PRETTY_FUNCTION__))
;
106 Prefix.Dst = Inst.getOperand(0).getReg();
107 Prefix.Pg = Inst.getOperand(2).getReg();
108 break;
109 case AArch64::MOVPRFX_ZPzZ_B:
110 case AArch64::MOVPRFX_ZPzZ_H:
111 case AArch64::MOVPRFX_ZPzZ_S:
112 case AArch64::MOVPRFX_ZPzZ_D:
113 Prefix.Active = true;
114 Prefix.Predicated = true;
115 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
116 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 117, __extension__ __PRETTY_FUNCTION__))
117 "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 117, __extension__ __PRETTY_FUNCTION__))
;
118 Prefix.Dst = Inst.getOperand(0).getReg();
119 Prefix.Pg = Inst.getOperand(1).getReg();
120 break;
121 default:
122 break;
123 }
124
125 return Prefix;
126 }
127
128 PrefixInfo() : Active(false), Predicated(false) {}
129 bool isActive() const { return Active; }
130 bool isPredicated() const { return Predicated; }
131 unsigned getElementSize() const {
132 assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail
("Predicated", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 132, __extension__ __PRETTY_FUNCTION__))
;
133 return ElementSize;
134 }
135 unsigned getDstReg() const { return Dst; }
136 unsigned getPgReg() const {
137 assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail
("Predicated", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 137, __extension__ __PRETTY_FUNCTION__))
;
138 return Pg;
139 }
140
141 private:
142 bool Active;
143 bool Predicated;
144 unsigned ElementSize;
145 unsigned Dst;
146 unsigned Pg;
147 } NextPrefix;
148
149 AArch64TargetStreamer &getTargetStreamer() {
150 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
151 return static_cast<AArch64TargetStreamer &>(TS);
152 }
153
154 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
155
156 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
157 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
158 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
159 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
160 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
161 bool parseRegister(OperandVector &Operands);
162 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
163 bool parseNeonVectorList(OperandVector &Operands);
164 bool parseOptionalMulOperand(OperandVector &Operands);
165 bool parseKeywordOperand(OperandVector &Operands);
166 bool parseOperand(OperandVector &Operands, bool isCondCode,
167 bool invertCondCode);
168 bool parseImmExpr(int64_t &Out);
169 bool parseComma();
170 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
171 unsigned Last);
172
173 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
174 OperandVector &Operands);
175
176 bool parseDirectiveArch(SMLoc L);
177 bool parseDirectiveArchExtension(SMLoc L);
178 bool parseDirectiveCPU(SMLoc L);
179 bool parseDirectiveInst(SMLoc L);
180
181 bool parseDirectiveTLSDescCall(SMLoc L);
182
183 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
184 bool parseDirectiveLtorg(SMLoc L);
185
186 bool parseDirectiveReq(StringRef Name, SMLoc L);
187 bool parseDirectiveUnreq(SMLoc L);
188 bool parseDirectiveCFINegateRAState();
189 bool parseDirectiveCFIBKeyFrame();
190
191 bool parseDirectiveVariantPCS(SMLoc L);
192
193 bool parseDirectiveSEHAllocStack(SMLoc L);
194 bool parseDirectiveSEHPrologEnd(SMLoc L);
195 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
196 bool parseDirectiveSEHSaveFPLR(SMLoc L);
197 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
198 bool parseDirectiveSEHSaveReg(SMLoc L);
199 bool parseDirectiveSEHSaveRegX(SMLoc L);
200 bool parseDirectiveSEHSaveRegP(SMLoc L);
201 bool parseDirectiveSEHSaveRegPX(SMLoc L);
202 bool parseDirectiveSEHSaveLRPair(SMLoc L);
203 bool parseDirectiveSEHSaveFReg(SMLoc L);
204 bool parseDirectiveSEHSaveFRegX(SMLoc L);
205 bool parseDirectiveSEHSaveFRegP(SMLoc L);
206 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
207 bool parseDirectiveSEHSetFP(SMLoc L);
208 bool parseDirectiveSEHAddFP(SMLoc L);
209 bool parseDirectiveSEHNop(SMLoc L);
210 bool parseDirectiveSEHSaveNext(SMLoc L);
211 bool parseDirectiveSEHEpilogStart(SMLoc L);
212 bool parseDirectiveSEHEpilogEnd(SMLoc L);
213 bool parseDirectiveSEHTrapFrame(SMLoc L);
214 bool parseDirectiveSEHMachineFrame(SMLoc L);
215 bool parseDirectiveSEHContext(SMLoc L);
216 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
217
218 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
219 SmallVectorImpl<SMLoc> &Loc);
220 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
221 OperandVector &Operands, MCStreamer &Out,
222 uint64_t &ErrorInfo,
223 bool MatchingInlineAsm) override;
224/// @name Auto-generated Match Functions
225/// {
226
227#define GET_ASSEMBLER_HEADER
228#include "AArch64GenAsmMatcher.inc"
229
230 /// }
231
232 OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
233 OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
234 RegKind MatchKind);
235 OperandMatchResultTy tryParseMatrixRegister(OperandVector &Operands);
236 OperandMatchResultTy tryParseSVCR(OperandVector &Operands);
237 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
238 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
239 OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
240 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
241 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
242 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
243 template <bool IsSVEPrefetch = false>
244 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
245 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
246 OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
247 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
248 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
249 template<bool AddFPZeroAsLiteral>
250 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
251 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
252 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
253 bool tryParseNeonVectorRegister(OperandVector &Operands);
254 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
255 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
256 template <bool ParseShiftExtend,
257 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
258 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
259 template <bool ParseShiftExtend, bool ParseSuffix>
260 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
261 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
262 template <RegKind VectorKind>
263 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
264 bool ExpectMatch = false);
265 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
266 OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands);
267
268public:
269 enum AArch64MatchResultTy {
270 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
271#define GET_OPERAND_DIAGNOSTIC_TYPES
272#include "AArch64GenAsmMatcher.inc"
273 };
274 bool IsILP32;
275
276 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
277 const MCInstrInfo &MII, const MCTargetOptions &Options)
278 : MCTargetAsmParser(Options, STI, MII) {
279 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
280 MCAsmParserExtension::Initialize(Parser);
281 MCStreamer &S = getParser().getStreamer();
282 if (S.getTargetStreamer() == nullptr)
283 new AArch64TargetStreamer(S);
284
285 // Alias .hword/.word/.[dx]word to the target-independent
286 // .2byte/.4byte/.8byte directives as they have the same form and
287 // semantics:
288 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
289 Parser.addAliasForDirective(".hword", ".2byte");
290 Parser.addAliasForDirective(".word", ".4byte");
291 Parser.addAliasForDirective(".dword", ".8byte");
292 Parser.addAliasForDirective(".xword", ".8byte");
293
294 // Initialize the set of available features.
295 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
296 }
297
298 bool regsEqual(const MCParsedAsmOperand &Op1,
299 const MCParsedAsmOperand &Op2) const override;
300 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
301 SMLoc NameLoc, OperandVector &Operands) override;
302 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
303 OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
304 SMLoc &EndLoc) override;
305 bool ParseDirective(AsmToken DirectiveID) override;
306 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
307 unsigned Kind) override;
308
309 static bool classifySymbolRef(const MCExpr *Expr,
310 AArch64MCExpr::VariantKind &ELFRefKind,
311 MCSymbolRefExpr::VariantKind &DarwinRefKind,
312 int64_t &Addend);
313};
314
315/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
316/// instruction.
317class AArch64Operand : public MCParsedAsmOperand {
318private:
319 enum KindTy {
320 k_Immediate,
321 k_ShiftedImm,
322 k_CondCode,
323 k_Register,
324 k_MatrixRegister,
325 k_SVCR,
326 k_VectorList,
327 k_VectorIndex,
328 k_Token,
329 k_SysReg,
330 k_SysCR,
331 k_Prefetch,
332 k_ShiftExtend,
333 k_FPImm,
334 k_Barrier,
335 k_PSBHint,
336 k_BTIHint,
337 } Kind;
338
339 SMLoc StartLoc, EndLoc;
340
341 struct TokOp {
342 const char *Data;
343 unsigned Length;
344 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
345 };
346
347 // Separate shift/extend operand.
348 struct ShiftExtendOp {
349 AArch64_AM::ShiftExtendType Type;
350 unsigned Amount;
351 bool HasExplicitAmount;
352 };
353
354 struct RegOp {
355 unsigned RegNum;
356 RegKind Kind;
357 int ElementWidth;
358
359 // The register may be allowed as a different register class,
360 // e.g. for GPR64as32 or GPR32as64.
361 RegConstraintEqualityTy EqualityTy;
362
363 // In some cases the shift/extend needs to be explicitly parsed together
364 // with the register, rather than as a separate operand. This is needed
365 // for addressing modes where the instruction as a whole dictates the
366 // scaling/extend, rather than specific bits in the instruction.
367 // By parsing them as a single operand, we avoid the need to pass an
368 // extra operand in all CodeGen patterns (because all operands need to
369 // have an associated value), and we avoid the need to update TableGen to
370 // accept operands that have no associated bits in the instruction.
371 //
372 // An added benefit of parsing them together is that the assembler
373 // can give a sensible diagnostic if the scaling is not correct.
374 //
375 // The default is 'lsl #0' (HasExplicitAmount = false) if no
376 // ShiftExtend is specified.
377 ShiftExtendOp ShiftExtend;
378 };
379
380 struct MatrixRegOp {
381 unsigned RegNum;
382 unsigned ElementWidth;
383 MatrixKind Kind;
384 };
385
386 struct VectorListOp {
387 unsigned RegNum;
388 unsigned Count;
389 unsigned NumElements;
390 unsigned ElementWidth;
391 RegKind RegisterKind;
392 };
393
394 struct VectorIndexOp {
395 int Val;
396 };
397
398 struct ImmOp {
399 const MCExpr *Val;
400 };
401
402 struct ShiftedImmOp {
403 const MCExpr *Val;
404 unsigned ShiftAmount;
405 };
406
407 struct CondCodeOp {
408 AArch64CC::CondCode Code;
409 };
410
411 struct FPImmOp {
412 uint64_t Val; // APFloat value bitcasted to uint64_t.
413 bool IsExact; // describes whether parsed value was exact.
414 };
415
416 struct BarrierOp {
417 const char *Data;
418 unsigned Length;
419 unsigned Val; // Not the enum since not all values have names.
420 bool HasnXSModifier;
421 };
422
423 struct SysRegOp {
424 const char *Data;
425 unsigned Length;
426 uint32_t MRSReg;
427 uint32_t MSRReg;
428 uint32_t PStateField;
429 };
430
431 struct SysCRImmOp {
432 unsigned Val;
433 };
434
435 struct PrefetchOp {
436 const char *Data;
437 unsigned Length;
438 unsigned Val;
439 };
440
441 struct PSBHintOp {
442 const char *Data;
443 unsigned Length;
444 unsigned Val;
445 };
446
447 struct BTIHintOp {
448 const char *Data;
449 unsigned Length;
450 unsigned Val;
451 };
452
453 struct SVCROp {
454 const char *Data;
455 unsigned Length;
456 unsigned PStateField;
457 };
458
459 union {
460 struct TokOp Tok;
461 struct RegOp Reg;
462 struct MatrixRegOp MatrixReg;
463 struct VectorListOp VectorList;
464 struct VectorIndexOp VectorIndex;
465 struct ImmOp Imm;
466 struct ShiftedImmOp ShiftedImm;
467 struct CondCodeOp CondCode;
468 struct FPImmOp FPImm;
469 struct BarrierOp Barrier;
470 struct SysRegOp SysReg;
471 struct SysCRImmOp SysCRImm;
472 struct PrefetchOp Prefetch;
473 struct PSBHintOp PSBHint;
474 struct BTIHintOp BTIHint;
475 struct ShiftExtendOp ShiftExtend;
476 struct SVCROp SVCR;
477 };
478
479 // Keep the MCContext around as the MCExprs may need manipulated during
480 // the add<>Operands() calls.
481 MCContext &Ctx;
482
483public:
484 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
485
486 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
487 Kind = o.Kind;
488 StartLoc = o.StartLoc;
489 EndLoc = o.EndLoc;
490 switch (Kind) {
491 case k_Token:
492 Tok = o.Tok;
493 break;
494 case k_Immediate:
495 Imm = o.Imm;
496 break;
497 case k_ShiftedImm:
498 ShiftedImm = o.ShiftedImm;
499 break;
500 case k_CondCode:
501 CondCode = o.CondCode;
502 break;
503 case k_FPImm:
504 FPImm = o.FPImm;
505 break;
506 case k_Barrier:
507 Barrier = o.Barrier;
508 break;
509 case k_Register:
510 Reg = o.Reg;
511 break;
512 case k_MatrixRegister:
513 MatrixReg = o.MatrixReg;
514 break;
515 case k_VectorList:
516 VectorList = o.VectorList;
517 break;
518 case k_VectorIndex:
519 VectorIndex = o.VectorIndex;
520 break;
521 case k_SysReg:
522 SysReg = o.SysReg;
523 break;
524 case k_SysCR:
525 SysCRImm = o.SysCRImm;
526 break;
527 case k_Prefetch:
528 Prefetch = o.Prefetch;
529 break;
530 case k_PSBHint:
531 PSBHint = o.PSBHint;
532 break;
533 case k_BTIHint:
534 BTIHint = o.BTIHint;
535 break;
536 case k_ShiftExtend:
537 ShiftExtend = o.ShiftExtend;
538 break;
539 case k_SVCR:
540 SVCR = o.SVCR;
541 break;
542 }
543 }
544
545 /// getStartLoc - Get the location of the first token of this operand.
546 SMLoc getStartLoc() const override { return StartLoc; }
547 /// getEndLoc - Get the location of the last token of this operand.
548 SMLoc getEndLoc() const override { return EndLoc; }
549
550 StringRef getToken() const {
551 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 551, __extension__ __PRETTY_FUNCTION__))
;
552 return StringRef(Tok.Data, Tok.Length);
553 }
554
555 bool isTokenSuffix() const {
556 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 556, __extension__ __PRETTY_FUNCTION__))
;
557 return Tok.IsSuffix;
558 }
559
560 const MCExpr *getImm() const {
561 assert(Kind == k_Immediate && "Invalid access!")(static_cast <bool> (Kind == k_Immediate && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 561, __extension__ __PRETTY_FUNCTION__))
;
562 return Imm.Val;
563 }
564
565 const MCExpr *getShiftedImmVal() const {
566 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 566, __extension__ __PRETTY_FUNCTION__))
;
567 return ShiftedImm.Val;
568 }
569
570 unsigned getShiftedImmShift() const {
571 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 571, __extension__ __PRETTY_FUNCTION__))
;
572 return ShiftedImm.ShiftAmount;
573 }
574
575 AArch64CC::CondCode getCondCode() const {
576 assert(Kind == k_CondCode && "Invalid access!")(static_cast <bool> (Kind == k_CondCode && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 576, __extension__ __PRETTY_FUNCTION__))
;
577 return CondCode.Code;
578 }
579
580 APFloat getFPImm() const {
581 assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 581, __extension__ __PRETTY_FUNCTION__))
;
582 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
583 }
584
585 bool getFPImmIsExact() const {
586 assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 586, __extension__ __PRETTY_FUNCTION__))
;
587 return FPImm.IsExact;
588 }
589
590 unsigned getBarrier() const {
591 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 591, __extension__ __PRETTY_FUNCTION__))
;
592 return Barrier.Val;
593 }
594
595 StringRef getBarrierName() const {
596 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 596, __extension__ __PRETTY_FUNCTION__))
;
597 return StringRef(Barrier.Data, Barrier.Length);
598 }
599
600 bool getBarriernXSModifier() const {
601 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 601, __extension__ __PRETTY_FUNCTION__))
;
602 return Barrier.HasnXSModifier;
603 }
604
605 unsigned getReg() const override {
606 assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 606, __extension__ __PRETTY_FUNCTION__))
;
607 return Reg.RegNum;
608 }
609
610 unsigned getMatrixReg() const {
611 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 611, __extension__ __PRETTY_FUNCTION__))
;
612 return MatrixReg.RegNum;
613 }
614
615 unsigned getMatrixElementWidth() const {
616 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 616, __extension__ __PRETTY_FUNCTION__))
;
617 return MatrixReg.ElementWidth;
618 }
619
620 MatrixKind getMatrixKind() const {
621 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 621, __extension__ __PRETTY_FUNCTION__))
;
622 return MatrixReg.Kind;
623 }
624
625 RegConstraintEqualityTy getRegEqualityTy() const {
626 assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 626, __extension__ __PRETTY_FUNCTION__))
;
627 return Reg.EqualityTy;
628 }
629
630 unsigned getVectorListStart() const {
631 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 631, __extension__ __PRETTY_FUNCTION__))
;
632 return VectorList.RegNum;
633 }
634
635 unsigned getVectorListCount() const {
636 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 636, __extension__ __PRETTY_FUNCTION__))
;
637 return VectorList.Count;
638 }
639
640 int getVectorIndex() const {
641 assert(Kind == k_VectorIndex && "Invalid access!")(static_cast <bool> (Kind == k_VectorIndex && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 641, __extension__ __PRETTY_FUNCTION__))
;
642 return VectorIndex.Val;
643 }
644
645 StringRef getSysReg() const {
646 assert(Kind == k_SysReg && "Invalid access!")(static_cast <bool> (Kind == k_SysReg && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 646, __extension__ __PRETTY_FUNCTION__))
;
647 return StringRef(SysReg.Data, SysReg.Length);
648 }
649
650 unsigned getSysCR() const {
651 assert(Kind == k_SysCR && "Invalid access!")(static_cast <bool> (Kind == k_SysCR && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 651, __extension__ __PRETTY_FUNCTION__))
;
652 return SysCRImm.Val;
653 }
654
655 unsigned getPrefetch() const {
656 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 656, __extension__ __PRETTY_FUNCTION__))
;
657 return Prefetch.Val;
658 }
659
660 unsigned getPSBHint() const {
661 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 661, __extension__ __PRETTY_FUNCTION__))
;
662 return PSBHint.Val;
663 }
664
665 StringRef getPSBHintName() const {
666 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 666, __extension__ __PRETTY_FUNCTION__))
;
667 return StringRef(PSBHint.Data, PSBHint.Length);
668 }
669
670 unsigned getBTIHint() const {
671 assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 671, __extension__ __PRETTY_FUNCTION__))
;
672 return BTIHint.Val;
673 }
674
675 StringRef getBTIHintName() const {
676 assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 676, __extension__ __PRETTY_FUNCTION__))
;
677 return StringRef(BTIHint.Data, BTIHint.Length);
678 }
679
680 StringRef getSVCR() const {
681 assert(Kind == k_SVCR && "Invalid access!")(static_cast <bool> (Kind == k_SVCR && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SVCR && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 681, __extension__ __PRETTY_FUNCTION__))
;
682 return StringRef(SVCR.Data, SVCR.Length);
683 }
684
685 StringRef getPrefetchName() const {
686 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 686, __extension__ __PRETTY_FUNCTION__))
;
687 return StringRef(Prefetch.Data, Prefetch.Length);
688 }
689
690 AArch64_AM::ShiftExtendType getShiftExtendType() const {
691 if (Kind == k_ShiftExtend)
692 return ShiftExtend.Type;
693 if (Kind == k_Register)
694 return Reg.ShiftExtend.Type;
695 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 695)
;
696 }
697
698 unsigned getShiftExtendAmount() const {
699 if (Kind == k_ShiftExtend)
700 return ShiftExtend.Amount;
701 if (Kind == k_Register)
702 return Reg.ShiftExtend.Amount;
703 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 703)
;
704 }
705
706 bool hasShiftExtendAmount() const {
707 if (Kind == k_ShiftExtend)
708 return ShiftExtend.HasExplicitAmount;
709 if (Kind == k_Register)
710 return Reg.ShiftExtend.HasExplicitAmount;
711 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 711)
;
712 }
713
714 bool isImm() const override { return Kind == k_Immediate; }
715 bool isMem() const override { return false; }
716
717 bool isUImm6() const {
718 if (!isImm())
719 return false;
720 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
721 if (!MCE)
722 return false;
723 int64_t Val = MCE->getValue();
724 return (Val >= 0 && Val < 64);
725 }
726
727 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
728
729 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
730 return isImmScaled<Bits, Scale>(true);
731 }
732
733 template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
734 return isImmScaled<Bits, Scale>(false);
735 }
736
737 template <int Bits, int Scale>
738 DiagnosticPredicate isImmScaled(bool Signed) const {
739 if (!isImm())
740 return DiagnosticPredicateTy::NoMatch;
741
742 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
743 if (!MCE)
744 return DiagnosticPredicateTy::NoMatch;
745
746 int64_t MinVal, MaxVal;
747 if (Signed) {
748 int64_t Shift = Bits - 1;
749 MinVal = (int64_t(1) << Shift) * -Scale;
750 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
751 } else {
752 MinVal = 0;
753 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
754 }
755
756 int64_t Val = MCE->getValue();
757 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
758 return DiagnosticPredicateTy::Match;
759
760 return DiagnosticPredicateTy::NearMatch;
761 }
762
763 DiagnosticPredicate isSVEPattern() const {
764 if (!isImm())
765 return DiagnosticPredicateTy::NoMatch;
766 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
767 if (!MCE)
768 return DiagnosticPredicateTy::NoMatch;
769 int64_t Val = MCE->getValue();
770 if (Val >= 0 && Val < 32)
771 return DiagnosticPredicateTy::Match;
772 return DiagnosticPredicateTy::NearMatch;
773 }
774
775 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
776 AArch64MCExpr::VariantKind ELFRefKind;
777 MCSymbolRefExpr::VariantKind DarwinRefKind;
778 int64_t Addend;
779 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
780 Addend)) {
781 // If we don't understand the expression, assume the best and
782 // let the fixup and relocation code deal with it.
783 return true;
784 }
785
786 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
787 ELFRefKind == AArch64MCExpr::VK_LO12 ||
788 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
789 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
790 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
791 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
792 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
793 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
794 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
795 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
796 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
797 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
798 // Note that we don't range-check the addend. It's adjusted modulo page
799 // size when converted, so there is no "out of range" condition when using
800 // @pageoff.
801 return true;
802 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
803 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
804 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
805 return Addend == 0;
806 }
807
808 return false;
809 }
810
811 template <int Scale> bool isUImm12Offset() const {
812 if (!isImm())
813 return false;
814
815 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
816 if (!MCE)
817 return isSymbolicUImm12Offset(getImm());
818
819 int64_t Val = MCE->getValue();
820 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
821 }
822
823 template <int N, int M>
824 bool isImmInRange() const {
825 if (!isImm())
826 return false;
827 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
828 if (!MCE)
829 return false;
830 int64_t Val = MCE->getValue();
831 return (Val >= N && Val <= M);
832 }
833
834 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
835 // a logical immediate can always be represented when inverted.
836 template <typename T>
837 bool isLogicalImm() const {
838 if (!isImm())
839 return false;
840 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
841 if (!MCE)
842 return false;
843
844 int64_t Val = MCE->getValue();
845 // Avoid left shift by 64 directly.
846 uint64_t Upper = UINT64_C(-1)-1UL << (sizeof(T) * 4) << (sizeof(T) * 4);
847 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
848 if ((Val & Upper) && (Val & Upper) != Upper)
849 return false;
850
851 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
852 }
853
854 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
855
856 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
857 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
858 /// immediate that can be shifted by 'Shift'.
859 template <unsigned Width>
860 Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
861 if (isShiftedImm() && Width == getShiftedImmShift())
862 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
863 return std::make_pair(CE->getValue(), Width);
864
865 if (isImm())
866 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
867 int64_t Val = CE->getValue();
868 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
869 return std::make_pair(Val >> Width, Width);
870 else
871 return std::make_pair(Val, 0u);
872 }
873
874 return {};
875 }
876
877 bool isAddSubImm() const {
878 if (!isShiftedImm() && !isImm())
879 return false;
880
881 const MCExpr *Expr;
882
883 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
884 if (isShiftedImm()) {
885 unsigned Shift = ShiftedImm.ShiftAmount;
886 Expr = ShiftedImm.Val;
887 if (Shift != 0 && Shift != 12)
888 return false;
889 } else {
890 Expr = getImm();
891 }
892
893 AArch64MCExpr::VariantKind ELFRefKind;
894 MCSymbolRefExpr::VariantKind DarwinRefKind;
895 int64_t Addend;
896 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
897 DarwinRefKind, Addend)) {
898 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
899 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
900 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
901 || ELFRefKind == AArch64MCExpr::VK_LO12
902 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
903 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
904 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
905 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
906 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
907 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
908 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
909 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
910 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
911 }
912
913 // If it's a constant, it should be a real immediate in range.
914 if (auto ShiftedVal = getShiftedVal<12>())
915 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
916
917 // If it's an expression, we hope for the best and let the fixup/relocation
918 // code deal with it.
919 return true;
920 }
921
922 bool isAddSubImmNeg() const {
923 if (!isShiftedImm() && !isImm())
924 return false;
925
926 // Otherwise it should be a real negative immediate in range.
927 if (auto ShiftedVal = getShiftedVal<12>())
928 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
929
930 return false;
931 }
932
933 // Signed value in the range -128 to +127. For element widths of
934 // 16 bits or higher it may also be a signed multiple of 256 in the
935 // range -32768 to +32512.
936 // For element-width of 8 bits a range of -128 to 255 is accepted,
937 // since a copy of a byte can be either signed/unsigned.
938 template <typename T>
939 DiagnosticPredicate isSVECpyImm() const {
940 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
941 return DiagnosticPredicateTy::NoMatch;
942
943 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
944 std::is_same<int8_t, T>::value;
945 if (auto ShiftedImm = getShiftedVal<8>())
946 if (!(IsByte && ShiftedImm->second) &&
947 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
948 << ShiftedImm->second))
949 return DiagnosticPredicateTy::Match;
950
951 return DiagnosticPredicateTy::NearMatch;
952 }
953
954 // Unsigned value in the range 0 to 255. For element widths of
955 // 16 bits or higher it may also be a signed multiple of 256 in the
956 // range 0 to 65280.
957 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
958 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
959 return DiagnosticPredicateTy::NoMatch;
960
961 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
962 std::is_same<int8_t, T>::value;
963 if (auto ShiftedImm = getShiftedVal<8>())
964 if (!(IsByte && ShiftedImm->second) &&
965 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
966 << ShiftedImm->second))
967 return DiagnosticPredicateTy::Match;
968
969 return DiagnosticPredicateTy::NearMatch;
970 }
971
972 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
973 if (isLogicalImm<T>() && !isSVECpyImm<T>())
974 return DiagnosticPredicateTy::Match;
975 return DiagnosticPredicateTy::NoMatch;
976 }
977
978 bool isCondCode() const { return Kind == k_CondCode; }
979
980 bool isSIMDImmType10() const {
981 if (!isImm())
982 return false;
983 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
984 if (!MCE)
985 return false;
986 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
987 }
988
989 template<int N>
990 bool isBranchTarget() const {
991 if (!isImm())
992 return false;
993 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
994 if (!MCE)
995 return true;
996 int64_t Val = MCE->getValue();
997 if (Val & 0x3)
998 return false;
999 assert(N > 0 && "Branch target immediate cannot be 0 bits!")(static_cast <bool> (N > 0 && "Branch target immediate cannot be 0 bits!"
) ? void (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 999, __extension__ __PRETTY_FUNCTION__))
;
1000 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1001 }
1002
1003 bool
1004 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1005 if (!isImm())
1006 return false;
1007
1008 AArch64MCExpr::VariantKind ELFRefKind;
1009 MCSymbolRefExpr::VariantKind DarwinRefKind;
1010 int64_t Addend;
1011 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1012 DarwinRefKind, Addend)) {
1013 return false;
1014 }
1015 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1016 return false;
1017
1018 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
1019 if (ELFRefKind == AllowedModifiers[i])
1020 return true;
1021 }
1022
1023 return false;
1024 }
1025
1026 bool isMovWSymbolG3() const {
1027 return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
1028 }
1029
1030 bool isMovWSymbolG2() const {
1031 return isMovWSymbol(
1032 {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
1033 AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
1034 AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
1035 AArch64MCExpr::VK_DTPREL_G2});
1036 }
1037
1038 bool isMovWSymbolG1() const {
1039 return isMovWSymbol(
1040 {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
1041 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
1042 AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
1043 AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
1044 AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
1045 }
1046
1047 bool isMovWSymbolG0() const {
1048 return isMovWSymbol(
1049 {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
1050 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
1051 AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
1052 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
1053 AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
1054 }
1055
1056 template<int RegWidth, int Shift>
1057 bool isMOVZMovAlias() const {
1058 if (!isImm()) return false;
1059
1060 const MCExpr *E = getImm();
1061 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1062 uint64_t Value = CE->getValue();
1063
1064 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1065 }
1066 // Only supports the case of Shift being 0 if an expression is used as an
1067 // operand
1068 return !Shift && E;
1069 }
1070
1071 template<int RegWidth, int Shift>
1072 bool isMOVNMovAlias() const {
1073 if (!isImm()) return false;
1074
1075 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1076 if (!CE) return false;
1077 uint64_t Value = CE->getValue();
1078
1079 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1080 }
1081
1082 bool isFPImm() const {
1083 return Kind == k_FPImm &&
1084 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1085 }
1086
1087 bool isBarrier() const {
1088 return Kind == k_Barrier && !getBarriernXSModifier();
1089 }
1090 bool isBarriernXS() const {
1091 return Kind == k_Barrier && getBarriernXSModifier();
1092 }
1093 bool isSysReg() const { return Kind == k_SysReg; }
1094
1095 bool isMRSSystemRegister() const {
1096 if (!isSysReg()) return false;
1097
1098 return SysReg.MRSReg != -1U;
1099 }
1100
1101 bool isMSRSystemRegister() const {
1102 if (!isSysReg()) return false;
1103 return SysReg.MSRReg != -1U;
1104 }
1105
1106 bool isSystemPStateFieldWithImm0_1() const {
1107 if (!isSysReg()) return false;
1108 return (SysReg.PStateField == AArch64PState::PAN ||
1109 SysReg.PStateField == AArch64PState::DIT ||
1110 SysReg.PStateField == AArch64PState::UAO ||
1111 SysReg.PStateField == AArch64PState::SSBS);
1112 }
1113
1114 bool isSystemPStateFieldWithImm0_15() const {
1115 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1116 return SysReg.PStateField != -1U;
1117 }
1118
1119 bool isSVCR() const {
1120 if (Kind != k_SVCR)
1121 return false;
1122 return SVCR.PStateField != -1U;
1123 }
1124
1125 bool isReg() const override {
1126 return Kind == k_Register;
1127 }
1128
1129 bool isScalarReg() const {
1130 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1131 }
1132
1133 bool isNeonVectorReg() const {
1134 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1135 }
1136
1137 bool isNeonVectorRegLo() const {
1138 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1139 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1140 Reg.RegNum) ||
1141 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1142 Reg.RegNum));
1143 }
1144
1145 bool isMatrix() const { return Kind == k_MatrixRegister; }
1146
1147 template <unsigned Class> bool isSVEVectorReg() const {
1148 RegKind RK;
1149 switch (Class) {
1150 case AArch64::ZPRRegClassID:
1151 case AArch64::ZPR_3bRegClassID:
1152 case AArch64::ZPR_4bRegClassID:
1153 RK = RegKind::SVEDataVector;
1154 break;
1155 case AArch64::PPRRegClassID:
1156 case AArch64::PPR_3bRegClassID:
1157 RK = RegKind::SVEPredicateVector;
1158 break;
1159 default:
1160 llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class",
"/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1160)
;
1161 }
1162
1163 return (Kind == k_Register && Reg.Kind == RK) &&
1164 AArch64MCRegisterClasses[Class].contains(getReg());
1165 }
1166
1167 template <unsigned Class> bool isFPRasZPR() const {
1168 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1169 AArch64MCRegisterClasses[Class].contains(getReg());
1170 }
1171
1172 template <int ElementWidth, unsigned Class>
1173 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1174 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1175 return DiagnosticPredicateTy::NoMatch;
1176
1177 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1178 return DiagnosticPredicateTy::Match;
1179
1180 return DiagnosticPredicateTy::NearMatch;
1181 }
1182
1183 template <int ElementWidth, unsigned Class>
1184 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1185 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1186 return DiagnosticPredicateTy::NoMatch;
1187
1188 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1189 return DiagnosticPredicateTy::Match;
1190
1191 return DiagnosticPredicateTy::NearMatch;
1192 }
1193
1194 template <int ElementWidth, unsigned Class,
1195 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1196 bool ShiftWidthAlwaysSame>
1197 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1198 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1199 if (!VectorMatch.isMatch())
1200 return DiagnosticPredicateTy::NoMatch;
1201
1202 // Give a more specific diagnostic when the user has explicitly typed in
1203 // a shift-amount that does not match what is expected, but for which
1204 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1205 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1206 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1207 ShiftExtendTy == AArch64_AM::SXTW) &&
1208 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1209 return DiagnosticPredicateTy::NoMatch;
1210
1211 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1212 return DiagnosticPredicateTy::Match;
1213
1214 return DiagnosticPredicateTy::NearMatch;
1215 }
1216
1217 bool isGPR32as64() const {
1218 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1219 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1220 }
1221
1222 bool isGPR64as32() const {
1223 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1224 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1225 }
1226
1227 bool isGPR64x8() const {
1228 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1229 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1230 Reg.RegNum);
1231 }
1232
1233 bool isWSeqPair() const {
1234 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1235 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1236 Reg.RegNum);
1237 }
1238
1239 bool isXSeqPair() const {
1240 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1241 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1242 Reg.RegNum);
1243 }
1244
1245 template<int64_t Angle, int64_t Remainder>
1246 DiagnosticPredicate isComplexRotation() const {
1247 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1248
1249 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1250 if (!CE) return DiagnosticPredicateTy::NoMatch;
1251 uint64_t Value = CE->getValue();
1252
1253 if (Value % Angle == Remainder && Value <= 270)
1254 return DiagnosticPredicateTy::Match;
1255 return DiagnosticPredicateTy::NearMatch;
1256 }
1257
1258 template <unsigned RegClassID> bool isGPR64() const {
1259 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1260 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1261 }
1262
1263 template <unsigned RegClassID, int ExtWidth>
1264 DiagnosticPredicate isGPR64WithShiftExtend() const {
1265 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1266 return DiagnosticPredicateTy::NoMatch;
1267
1268 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1269 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1270 return DiagnosticPredicateTy::Match;
1271 return DiagnosticPredicateTy::NearMatch;
1272 }
1273
1274 /// Is this a vector list with the type implicit (presumably attached to the
1275 /// instruction itself)?
1276 template <RegKind VectorKind, unsigned NumRegs>
1277 bool isImplicitlyTypedVectorList() const {
1278 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1279 VectorList.NumElements == 0 &&
1280 VectorList.RegisterKind == VectorKind;
1281 }
1282
1283 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1284 unsigned ElementWidth>
1285 bool isTypedVectorList() const {
1286 if (Kind != k_VectorList)
1287 return false;
1288 if (VectorList.Count != NumRegs)
1289 return false;
1290 if (VectorList.RegisterKind != VectorKind)
1291 return false;
1292 if (VectorList.ElementWidth != ElementWidth)
1293 return false;
1294 return VectorList.NumElements == NumElements;
1295 }
1296
1297 template <int Min, int Max>
1298 DiagnosticPredicate isVectorIndex() const {
1299 if (Kind != k_VectorIndex)
1300 return DiagnosticPredicateTy::NoMatch;
1301 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1302 return DiagnosticPredicateTy::Match;
1303 return DiagnosticPredicateTy::NearMatch;
1304 }
1305
1306 bool isToken() const override { return Kind == k_Token; }
1307
1308 bool isTokenEqual(StringRef Str) const {
1309 return Kind == k_Token && getToken() == Str;
1310 }
1311 bool isSysCR() const { return Kind == k_SysCR; }
1312 bool isPrefetch() const { return Kind == k_Prefetch; }
1313 bool isPSBHint() const { return Kind == k_PSBHint; }
1314 bool isBTIHint() const { return Kind == k_BTIHint; }
1315 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1316 bool isShifter() const {
1317 if (!isShiftExtend())
1318 return false;
1319
1320 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1321 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1322 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1323 ST == AArch64_AM::MSL);
1324 }
1325
1326 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1327 if (Kind != k_FPImm)
1328 return DiagnosticPredicateTy::NoMatch;
1329
1330 if (getFPImmIsExact()) {
1331 // Lookup the immediate from table of supported immediates.
1332 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1333 assert(Desc && "Unknown enum value")(static_cast <bool> (Desc && "Unknown enum value"
) ? void (0) : __assert_fail ("Desc && \"Unknown enum value\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1333, __extension__ __PRETTY_FUNCTION__))
;
1334
1335 // Calculate its FP value.
1336 APFloat RealVal(APFloat::IEEEdouble());
1337 auto StatusOrErr =
1338 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1339 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1340 llvm_unreachable("FP immediate is not exact")::llvm::llvm_unreachable_internal("FP immediate is not exact"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1340)
;
1341
1342 if (getFPImm().bitwiseIsEqual(RealVal))
1343 return DiagnosticPredicateTy::Match;
1344 }
1345
1346 return DiagnosticPredicateTy::NearMatch;
1347 }
1348
1349 template <unsigned ImmA, unsigned ImmB>
1350 DiagnosticPredicate isExactFPImm() const {
1351 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1352 if ((Res = isExactFPImm<ImmA>()))
1353 return DiagnosticPredicateTy::Match;
1354 if ((Res = isExactFPImm<ImmB>()))
1355 return DiagnosticPredicateTy::Match;
1356 return Res;
1357 }
1358
1359 bool isExtend() const {
1360 if (!isShiftExtend())
1361 return false;
1362
1363 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1364 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1365 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1366 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1367 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1368 ET == AArch64_AM::LSL) &&
1369 getShiftExtendAmount() <= 4;
1370 }
1371
1372 bool isExtend64() const {
1373 if (!isExtend())
1374 return false;
1375 // Make sure the extend expects a 32-bit source register.
1376 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1377 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1378 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1379 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1380 }
1381
1382 bool isExtendLSL64() const {
1383 if (!isExtend())
1384 return false;
1385 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1386 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1387 ET == AArch64_AM::LSL) &&
1388 getShiftExtendAmount() <= 4;
1389 }
1390
1391 template<int Width> bool isMemXExtend() const {
1392 if (!isExtend())
1393 return false;
1394 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1395 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1396 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1397 getShiftExtendAmount() == 0);
1398 }
1399
1400 template<int Width> bool isMemWExtend() const {
1401 if (!isExtend())
1402 return false;
1403 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1404 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1405 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1406 getShiftExtendAmount() == 0);
1407 }
1408
1409 template <unsigned width>
1410 bool isArithmeticShifter() const {
1411 if (!isShifter())
1412 return false;
1413
1414 // An arithmetic shifter is LSL, LSR, or ASR.
1415 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1416 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1417 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1418 }
1419
1420 template <unsigned width>
1421 bool isLogicalShifter() const {
1422 if (!isShifter())
1423 return false;
1424
1425 // A logical shifter is LSL, LSR, ASR or ROR.
1426 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1427 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1428 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1429 getShiftExtendAmount() < width;
1430 }
1431
1432 bool isMovImm32Shifter() const {
1433 if (!isShifter())
1434 return false;
1435
1436 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1437 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1438 if (ST != AArch64_AM::LSL)
1439 return false;
1440 uint64_t Val = getShiftExtendAmount();
1441 return (Val == 0 || Val == 16);
1442 }
1443
1444 bool isMovImm64Shifter() const {
1445 if (!isShifter())
1446 return false;
1447
1448 // A MOVi shifter is LSL of 0 or 16.
1449 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1450 if (ST != AArch64_AM::LSL)
1451 return false;
1452 uint64_t Val = getShiftExtendAmount();
1453 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1454 }
1455
1456 bool isLogicalVecShifter() const {
1457 if (!isShifter())
1458 return false;
1459
1460 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1461 unsigned Shift = getShiftExtendAmount();
1462 return getShiftExtendType() == AArch64_AM::LSL &&
1463 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1464 }
1465
1466 bool isLogicalVecHalfWordShifter() const {
1467 if (!isLogicalVecShifter())
1468 return false;
1469
1470 // A logical vector shifter is a left shift by 0 or 8.
1471 unsigned Shift = getShiftExtendAmount();
1472 return getShiftExtendType() == AArch64_AM::LSL &&
1473 (Shift == 0 || Shift == 8);
1474 }
1475
1476 bool isMoveVecShifter() const {
1477 if (!isShiftExtend())
1478 return false;
1479
1480 // A logical vector shifter is a left shift by 8 or 16.
1481 unsigned Shift = getShiftExtendAmount();
1482 return getShiftExtendType() == AArch64_AM::MSL &&
1483 (Shift == 8 || Shift == 16);
1484 }
1485
1486 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1487 // to LDUR/STUR when the offset is not legal for the former but is for
1488 // the latter. As such, in addition to checking for being a legal unscaled
1489 // address, also check that it is not a legal scaled address. This avoids
1490 // ambiguity in the matcher.
1491 template<int Width>
1492 bool isSImm9OffsetFB() const {
1493 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1494 }
1495
1496 bool isAdrpLabel() const {
1497 // Validation was handled during parsing, so we just sanity check that
1498 // something didn't go haywire.
1499 if (!isImm())
1500 return false;
1501
1502 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1503 int64_t Val = CE->getValue();
1504 int64_t Min = - (4096 * (1LL << (21 - 1)));
1505 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1506 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1507 }
1508
1509 return true;
1510 }
1511
1512 bool isAdrLabel() const {
1513 // Validation was handled during parsing, so we just sanity check that
1514 // something didn't go haywire.
1515 if (!isImm())
1516 return false;
1517
1518 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1519 int64_t Val = CE->getValue();
1520 int64_t Min = - (1LL << (21 - 1));
1521 int64_t Max = ((1LL << (21 - 1)) - 1);
1522 return Val >= Min && Val <= Max;
1523 }
1524
1525 return true;
1526 }
1527
1528 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1529 DiagnosticPredicate isMatrixRegOperand() const {
1530 if (!isMatrix())
1531 return DiagnosticPredicateTy::NoMatch;
1532 if (getMatrixKind() != Kind ||
1533 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1534 EltSize != getMatrixElementWidth())
1535 return DiagnosticPredicateTy::NearMatch;
1536 return DiagnosticPredicateTy::Match;
1537 }
1538
1539 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1540 // Add as immediates when possible. Null MCExpr = 0.
1541 if (!Expr)
1542 Inst.addOperand(MCOperand::createImm(0));
1543 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1544 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1545 else
1546 Inst.addOperand(MCOperand::createExpr(Expr));
1547 }
1548
1549 void addRegOperands(MCInst &Inst, unsigned N) const {
1550 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1550, __extension__ __PRETTY_FUNCTION__))
;
1551 Inst.addOperand(MCOperand::createReg(getReg()));
1552 }
1553
1554 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1555 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1555, __extension__ __PRETTY_FUNCTION__))
;
1556 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1557 }
1558
1559 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1560 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1560, __extension__ __PRETTY_FUNCTION__))
;
1561 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1562, __extension__ __PRETTY_FUNCTION__))
1562 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1562, __extension__ __PRETTY_FUNCTION__))
;
1563
1564 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1565 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1566 RI->getEncodingValue(getReg()));
1567
1568 Inst.addOperand(MCOperand::createReg(Reg));
1569 }
1570
1571 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1572 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1572, __extension__ __PRETTY_FUNCTION__))
;
1573 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1574, __extension__ __PRETTY_FUNCTION__))
1574 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1574, __extension__ __PRETTY_FUNCTION__))
;
1575
1576 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1577 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1578 RI->getEncodingValue(getReg()));
1579
1580 Inst.addOperand(MCOperand::createReg(Reg));
1581 }
1582
1583 template <int Width>
1584 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1585 unsigned Base;
1586 switch (Width) {
1587 case 8: Base = AArch64::B0; break;
1588 case 16: Base = AArch64::H0; break;
1589 case 32: Base = AArch64::S0; break;
1590 case 64: Base = AArch64::D0; break;
1591 case 128: Base = AArch64::Q0; break;
1592 default:
1593 llvm_unreachable("Unsupported width")::llvm::llvm_unreachable_internal("Unsupported width", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1593)
;
1594 }
1595 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1596 }
1597
1598 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1599 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1599, __extension__ __PRETTY_FUNCTION__))
;
1600 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1601, __extension__ __PRETTY_FUNCTION__))
1601 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1601, __extension__ __PRETTY_FUNCTION__))
;
1602 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1603 }
1604
1605 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1606 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1606, __extension__ __PRETTY_FUNCTION__))
;
1607 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1608, __extension__ __PRETTY_FUNCTION__))
1608 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1608, __extension__ __PRETTY_FUNCTION__))
;
1609 Inst.addOperand(MCOperand::createReg(getReg()));
1610 }
1611
1612 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1613 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1613, __extension__ __PRETTY_FUNCTION__))
;
1614 Inst.addOperand(MCOperand::createReg(getReg()));
1615 }
1616
1617 enum VecListIndexType {
1618 VecListIdx_DReg = 0,
1619 VecListIdx_QReg = 1,
1620 VecListIdx_ZReg = 2,
1621 };
1622
1623 template <VecListIndexType RegTy, unsigned NumRegs>
1624 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1625 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1625, __extension__ __PRETTY_FUNCTION__))
;
1626 static const unsigned FirstRegs[][5] = {
1627 /* DReg */ { AArch64::Q0,
1628 AArch64::D0, AArch64::D0_D1,
1629 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1630 /* QReg */ { AArch64::Q0,
1631 AArch64::Q0, AArch64::Q0_Q1,
1632 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1633 /* ZReg */ { AArch64::Z0,
1634 AArch64::Z0, AArch64::Z0_Z1,
1635 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1636 };
1637
1638 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs
<= 4) && " NumRegs must be <= 4 for ZRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1639, __extension__ __PRETTY_FUNCTION__))
1639 " NumRegs must be <= 4 for ZRegs")(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs
<= 4) && " NumRegs must be <= 4 for ZRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1639, __extension__ __PRETTY_FUNCTION__))
;
1640
1641 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1642 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1643 FirstRegs[(unsigned)RegTy][0]));
1644 }
1645
1646 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1647 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1647, __extension__ __PRETTY_FUNCTION__))
;
1648 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1649 }
1650
1651 template <unsigned ImmIs0, unsigned ImmIs1>
1652 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1653 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1653, __extension__ __PRETTY_FUNCTION__))
;
1654 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")(static_cast <bool> (bool(isExactFPImm<ImmIs0, ImmIs1
>()) && "Invalid operand") ? void (0) : __assert_fail
("bool(isExactFPImm<ImmIs0, ImmIs1>()) && \"Invalid operand\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1654, __extension__ __PRETTY_FUNCTION__))
;
1655 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1656 }
1657
1658 void addImmOperands(MCInst &Inst, unsigned N) const {
1659 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1659, __extension__ __PRETTY_FUNCTION__))
;
1660 // If this is a pageoff symrefexpr with an addend, adjust the addend
1661 // to be only the page-offset portion. Otherwise, just add the expr
1662 // as-is.
1663 addExpr(Inst, getImm());
1664 }
1665
1666 template <int Shift>
1667 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1668 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1668, __extension__ __PRETTY_FUNCTION__))
;
1669 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1670 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1671 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1672 } else if (isShiftedImm()) {
1673 addExpr(Inst, getShiftedImmVal());
1674 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1675 } else {
1676 addExpr(Inst, getImm());
1677 Inst.addOperand(MCOperand::createImm(0));
1678 }
1679 }
1680
1681 template <int Shift>
1682 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1683 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1683, __extension__ __PRETTY_FUNCTION__))
;
1684 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1685 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1686 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1687 } else
1688 llvm_unreachable("Not a shifted negative immediate")::llvm::llvm_unreachable_internal("Not a shifted negative immediate"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1688)
;
1689 }
1690
1691 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1692 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1692, __extension__ __PRETTY_FUNCTION__))
;
1693 Inst.addOperand(MCOperand::createImm(getCondCode()));
1694 }
1695
1696 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1697 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1697, __extension__ __PRETTY_FUNCTION__))
;
1698 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1699 if (!MCE)
1700 addExpr(Inst, getImm());
1701 else
1702 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1703 }
1704
1705 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1706 addImmOperands(Inst, N);
1707 }
1708
1709 template<int Scale>
1710 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1711 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1711, __extension__ __PRETTY_FUNCTION__))
;
1712 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1713
1714 if (!MCE) {
1715 Inst.addOperand(MCOperand::createExpr(getImm()));
1716 return;
1717 }
1718 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1719 }
1720
1721 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1722 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1722, __extension__ __PRETTY_FUNCTION__))
;
1723 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1724 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1725 }
1726
1727 template <int Scale>
1728 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1729 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1729, __extension__ __PRETTY_FUNCTION__))
;
1730 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1731 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1732 }
1733
1734 template <typename T>
1735 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1736 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1736, __extension__ __PRETTY_FUNCTION__))
;
1737 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1738 std::make_unsigned_t<T> Val = MCE->getValue();
1739 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1740 Inst.addOperand(MCOperand::createImm(encoding));
1741 }
1742
1743 template <typename T>
1744 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1745 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1745, __extension__ __PRETTY_FUNCTION__))
;
1746 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1747 std::make_unsigned_t<T> Val = ~MCE->getValue();
1748 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1749 Inst.addOperand(MCOperand::createImm(encoding));
1750 }
1751
1752 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1753 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1753, __extension__ __PRETTY_FUNCTION__))
;
1754 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1755 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1756 Inst.addOperand(MCOperand::createImm(encoding));
1757 }
1758
1759 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1760 // Branch operands don't encode the low bits, so shift them off
1761 // here. If it's a label, however, just put it on directly as there's
1762 // not enough information now to do anything.
1763 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1763, __extension__ __PRETTY_FUNCTION__))
;
1764 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1765 if (!MCE) {
1766 addExpr(Inst, getImm());
1767 return;
1768 }
1769 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1769, __extension__ __PRETTY_FUNCTION__))
;
1770 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1771 }
1772
1773 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1774 // Branch operands don't encode the low bits, so shift them off
1775 // here. If it's a label, however, just put it on directly as there's
1776 // not enough information now to do anything.
1777 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1777, __extension__ __PRETTY_FUNCTION__))
;
1778 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1779 if (!MCE) {
1780 addExpr(Inst, getImm());
1781 return;
1782 }
1783 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1783, __extension__ __PRETTY_FUNCTION__))
;
1784 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1785 }
1786
1787 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1788 // Branch operands don't encode the low bits, so shift them off
1789 // here. If it's a label, however, just put it on directly as there's
1790 // not enough information now to do anything.
1791 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1791, __extension__ __PRETTY_FUNCTION__))
;
1792 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1793 if (!MCE) {
1794 addExpr(Inst, getImm());
1795 return;
1796 }
1797 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1797, __extension__ __PRETTY_FUNCTION__))
;
1798 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1799 }
1800
1801 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1802 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1802, __extension__ __PRETTY_FUNCTION__))
;
1803 Inst.addOperand(MCOperand::createImm(
1804 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1805 }
1806
1807 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1808 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1808, __extension__ __PRETTY_FUNCTION__))
;
1809 Inst.addOperand(MCOperand::createImm(getBarrier()));
1810 }
1811
1812 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
1813 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1813, __extension__ __PRETTY_FUNCTION__))
;
1814 Inst.addOperand(MCOperand::createImm(getBarrier()));
1815 }
1816
1817 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1818 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1818, __extension__ __PRETTY_FUNCTION__))
;
1819
1820 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1821 }
1822
1823 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1824 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1824, __extension__ __PRETTY_FUNCTION__))
;
1825
1826 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1827 }
1828
1829 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1830 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1830, __extension__ __PRETTY_FUNCTION__))
;
1831
1832 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1833 }
1834
1835 void addSVCROperands(MCInst &Inst, unsigned N) const {
1836 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1836, __extension__ __PRETTY_FUNCTION__))
;
1837
1838 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
1839 }
1840
1841 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1842 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1842, __extension__ __PRETTY_FUNCTION__))
;
1843
1844 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1845 }
1846
1847 void addSysCROperands(MCInst &Inst, unsigned N) const {
1848 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1848, __extension__ __PRETTY_FUNCTION__))
;
1849 Inst.addOperand(MCOperand::createImm(getSysCR()));
1850 }
1851
1852 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1853 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1853, __extension__ __PRETTY_FUNCTION__))
;
1854 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1855 }
1856
1857 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1858 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1858, __extension__ __PRETTY_FUNCTION__))
;
1859 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1860 }
1861
1862 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1863 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1863, __extension__ __PRETTY_FUNCTION__))
;
1864 Inst.addOperand(MCOperand::createImm(getBTIHint()));
1865 }
1866
1867 void addShifterOperands(MCInst &Inst, unsigned N) const {
1868 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1868, __extension__ __PRETTY_FUNCTION__))
;
1869 unsigned Imm =
1870 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1871 Inst.addOperand(MCOperand::createImm(Imm));
1872 }
1873
1874 void addExtendOperands(MCInst &Inst, unsigned N) const {
1875 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1875, __extension__ __PRETTY_FUNCTION__))
;
1876 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1877 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1878 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1879 Inst.addOperand(MCOperand::createImm(Imm));
1880 }
1881
1882 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1883 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1883, __extension__ __PRETTY_FUNCTION__))
;
1884 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1885 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1886 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1887 Inst.addOperand(MCOperand::createImm(Imm));
1888 }
1889
1890 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1891 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1891, __extension__ __PRETTY_FUNCTION__))
;
1892 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1893 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1894 Inst.addOperand(MCOperand::createImm(IsSigned));
1895 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1896 }
1897
1898 // For 8-bit load/store instructions with a register offset, both the
1899 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1900 // they're disambiguated by whether the shift was explicit or implicit rather
1901 // than its size.
1902 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1903 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1903, __extension__ __PRETTY_FUNCTION__))
;
1904 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1905 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1906 Inst.addOperand(MCOperand::createImm(IsSigned));
1907 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1908 }
1909
1910 template<int Shift>
1911 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1912 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1912, __extension__ __PRETTY_FUNCTION__))
;
1913
1914 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1915 if (CE) {
1916 uint64_t Value = CE->getValue();
1917 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1918 } else {
1919 addExpr(Inst, getImm());
1920 }
1921 }
1922
1923 template<int Shift>
1924 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1925 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1925, __extension__ __PRETTY_FUNCTION__))
;
1926
1927 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1928 uint64_t Value = CE->getValue();
1929 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1930 }
1931
1932 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1933 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1933, __extension__ __PRETTY_FUNCTION__))
;
1934 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1935 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1936 }
1937
1938 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1939 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1939, __extension__ __PRETTY_FUNCTION__))
;
1940 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1941 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1942 }
1943
1944 void print(raw_ostream &OS) const override;
1945
1946 static std::unique_ptr<AArch64Operand>
1947 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
1948 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1949 Op->Tok.Data = Str.data();
1950 Op->Tok.Length = Str.size();
1951 Op->Tok.IsSuffix = IsSuffix;
1952 Op->StartLoc = S;
1953 Op->EndLoc = S;
1954 return Op;
1955 }
1956
1957 static std::unique_ptr<AArch64Operand>
1958 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1959 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1960 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1961 unsigned ShiftAmount = 0,
1962 unsigned HasExplicitAmount = false) {
1963 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1964 Op->Reg.RegNum = RegNum;
1965 Op->Reg.Kind = Kind;
1966 Op->Reg.ElementWidth = 0;
1967 Op->Reg.EqualityTy = EqTy;
1968 Op->Reg.ShiftExtend.Type = ExtTy;
1969 Op->Reg.ShiftExtend.Amount = ShiftAmount;
1970 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1971 Op->StartLoc = S;
1972 Op->EndLoc = E;
1973 return Op;
1974 }
1975
1976 static std::unique_ptr<AArch64Operand>
1977 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1978 SMLoc S, SMLoc E, MCContext &Ctx,
1979 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1980 unsigned ShiftAmount = 0,
1981 unsigned HasExplicitAmount = false) {
1982 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
) && "Invalid vector kind") ? void (0) : __assert_fail
("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1984, __extension__ __PRETTY_FUNCTION__))
1983 Kind == RegKind::SVEPredicateVector) &&(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
) && "Invalid vector kind") ? void (0) : __assert_fail
("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1984, __extension__ __PRETTY_FUNCTION__))
1984 "Invalid vector kind")(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
) && "Invalid vector kind") ? void (0) : __assert_fail
("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1984, __extension__ __PRETTY_FUNCTION__))
;
1985 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1986 HasExplicitAmount);
1987 Op->Reg.ElementWidth = ElementWidth;
1988 return Op;
1989 }
1990
1991 static std::unique_ptr<AArch64Operand>
1992 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1993 unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1994 MCContext &Ctx) {
1995 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
1996 Op->VectorList.RegNum = RegNum;
1997 Op->VectorList.Count = Count;
1998 Op->VectorList.NumElements = NumElements;
1999 Op->VectorList.ElementWidth = ElementWidth;
2000 Op->VectorList.RegisterKind = RegisterKind;
2001 Op->StartLoc = S;
2002 Op->EndLoc = E;
2003 return Op;
2004 }
2005
2006 static std::unique_ptr<AArch64Operand>
2007 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2008 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2009 Op->VectorIndex.Val = Idx;
2010 Op->StartLoc = S;
2011 Op->EndLoc = E;
2012 return Op;
2013 }
2014
2015 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2016 SMLoc E, MCContext &Ctx) {
2017 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2018 Op->Imm.Val = Val;
2019 Op->StartLoc = S;
2020 Op->EndLoc = E;
2021 return Op;
2022 }
2023
2024 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2025 unsigned ShiftAmount,
2026 SMLoc S, SMLoc E,
2027 MCContext &Ctx) {
2028 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2029 Op->ShiftedImm .Val = Val;
2030 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2031 Op->StartLoc = S;
2032 Op->EndLoc = E;
2033 return Op;
2034 }
2035
2036 static std::unique_ptr<AArch64Operand>
2037 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2038 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2039 Op->CondCode.Code = Code;
2040 Op->StartLoc = S;
2041 Op->EndLoc = E;
2042 return Op;
2043 }
2044
2045 static std::unique_ptr<AArch64Operand>
2046 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2047 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2048 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2049 Op->FPImm.IsExact = IsExact;
2050 Op->StartLoc = S;
2051 Op->EndLoc = S;
2052 return Op;
2053 }
2054
2055 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2056 StringRef Str,
2057 SMLoc S,
2058 MCContext &Ctx,
2059 bool HasnXSModifier) {
2060 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2061 Op->Barrier.Val = Val;
2062 Op->Barrier.Data = Str.data();
2063 Op->Barrier.Length = Str.size();
2064 Op->Barrier.HasnXSModifier = HasnXSModifier;
2065 Op->StartLoc = S;
2066 Op->EndLoc = S;
2067 return Op;
2068 }
2069
2070 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2071 uint32_t MRSReg,
2072 uint32_t MSRReg,
2073 uint32_t PStateField,
2074 MCContext &Ctx) {
2075 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2076 Op->SysReg.Data = Str.data();
2077 Op->SysReg.Length = Str.size();
2078 Op->SysReg.MRSReg = MRSReg;
2079 Op->SysReg.MSRReg = MSRReg;
2080 Op->SysReg.PStateField = PStateField;
2081 Op->StartLoc = S;
2082 Op->EndLoc = S;
2083 return Op;
2084 }
2085
2086 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2087 SMLoc E, MCContext &Ctx) {
2088 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2089 Op->SysCRImm.Val = Val;
2090 Op->StartLoc = S;
2091 Op->EndLoc = E;
2092 return Op;
2093 }
2094
2095 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2096 StringRef Str,
2097 SMLoc S,
2098 MCContext &Ctx) {
2099 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2100 Op->Prefetch.Val = Val;
2101 Op->Barrier.Data = Str.data();
2102 Op->Barrier.Length = Str.size();
2103 Op->StartLoc = S;
2104 Op->EndLoc = S;
2105 return Op;
2106 }
2107
2108 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2109 StringRef Str,
2110 SMLoc S,
2111 MCContext &Ctx) {
2112 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2113 Op->PSBHint.Val = Val;
2114 Op->PSBHint.Data = Str.data();
2115 Op->PSBHint.Length = Str.size();
2116 Op->StartLoc = S;
2117 Op->EndLoc = S;
2118 return Op;
2119 }
2120
2121 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2122 StringRef Str,
2123 SMLoc S,
2124 MCContext &Ctx) {
2125 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2126 Op->BTIHint.Val = Val | 32;
2127 Op->BTIHint.Data = Str.data();
2128 Op->BTIHint.Length = Str.size();
2129 Op->StartLoc = S;
2130 Op->EndLoc = S;
2131 return Op;
2132 }
2133
2134 static std::unique_ptr<AArch64Operand>
2135 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2136 SMLoc S, SMLoc E, MCContext &Ctx) {
2137 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2138 Op->MatrixReg.RegNum = RegNum;
2139 Op->MatrixReg.ElementWidth = ElementWidth;
2140 Op->MatrixReg.Kind = Kind;
2141 Op->StartLoc = S;
2142 Op->EndLoc = E;
2143 return Op;
2144 }
2145
2146 static std::unique_ptr<AArch64Operand>
2147 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2148 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2149 Op->SVCR.PStateField = PStateField;
2150 Op->SVCR.Data = Str.data();
2151 Op->SVCR.Length = Str.size();
2152 Op->StartLoc = S;
2153 Op->EndLoc = S;
2154 return Op;
2155 }
2156
2157 static std::unique_ptr<AArch64Operand>
2158 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2159 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2160 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2161 Op->ShiftExtend.Type = ShOp;
2162 Op->ShiftExtend.Amount = Val;
2163 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2164 Op->StartLoc = S;
2165 Op->EndLoc = E;
2166 return Op;
2167 }
2168};
2169
2170} // end anonymous namespace.
2171
2172void AArch64Operand::print(raw_ostream &OS) const {
2173 switch (Kind) {
2174 case k_FPImm:
2175 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2176 if (!getFPImmIsExact())
2177 OS << " (inexact)";
2178 OS << ">";
2179 break;
2180 case k_Barrier: {
2181 StringRef Name = getBarrierName();
2182 if (!Name.empty())
2183 OS << "<barrier " << Name << ">";
2184 else
2185 OS << "<barrier invalid #" << getBarrier() << ">";
2186 break;
2187 }
2188 case k_Immediate:
2189 OS << *getImm();
2190 break;
2191 case k_ShiftedImm: {
2192 unsigned Shift = getShiftedImmShift();
2193 OS << "<shiftedimm ";
2194 OS << *getShiftedImmVal();
2195 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2196 break;
2197 }
2198 case k_CondCode:
2199 OS << "<condcode " << getCondCode() << ">";
2200 break;
2201 case k_VectorList: {
2202 OS << "<vectorlist ";
2203 unsigned Reg = getVectorListStart();
2204 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2205 OS << Reg + i << " ";
2206 OS << ">";
2207 break;
2208 }
2209 case k_VectorIndex:
2210 OS << "<vectorindex " << getVectorIndex() << ">";
2211 break;
2212 case k_SysReg:
2213 OS << "<sysreg: " << getSysReg() << '>';
2214 break;
2215 case k_Token:
2216 OS << "'" << getToken() << "'";
2217 break;
2218 case k_SysCR:
2219 OS << "c" << getSysCR();
2220 break;
2221 case k_Prefetch: {
2222 StringRef Name = getPrefetchName();
2223 if (!Name.empty())
2224 OS << "<prfop " << Name << ">";
2225 else
2226 OS << "<prfop invalid #" << getPrefetch() << ">";
2227 break;
2228 }
2229 case k_PSBHint:
2230 OS << getPSBHintName();
2231 break;
2232 case k_BTIHint:
2233 OS << getBTIHintName();
2234 break;
2235 case k_MatrixRegister:
2236 OS << "<matrix " << getMatrixReg() << ">";
2237 break;
2238 case k_SVCR: {
2239 OS << getSVCR();
2240 break;
2241 }
2242 case k_Register:
2243 OS << "<register " << getReg() << ">";
2244 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2245 break;
2246 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2247 case k_ShiftExtend:
2248 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2249 << getShiftExtendAmount();
2250 if (!hasShiftExtendAmount())
2251 OS << "<imp>";
2252 OS << '>';
2253 break;
2254 }
2255}
2256
2257/// @name Auto-generated Match Functions
2258/// {
2259
2260static unsigned MatchRegisterName(StringRef Name);
2261
2262/// }
2263
2264static unsigned MatchNeonVectorRegName(StringRef Name) {
2265 return StringSwitch<unsigned>(Name.lower())
2266 .Case("v0", AArch64::Q0)
2267 .Case("v1", AArch64::Q1)
2268 .Case("v2", AArch64::Q2)
2269 .Case("v3", AArch64::Q3)
2270 .Case("v4", AArch64::Q4)
2271 .Case("v5", AArch64::Q5)
2272 .Case("v6", AArch64::Q6)
2273 .Case("v7", AArch64::Q7)
2274 .Case("v8", AArch64::Q8)
2275 .Case("v9", AArch64::Q9)
2276 .Case("v10", AArch64::Q10)
2277 .Case("v11", AArch64::Q11)
2278 .Case("v12", AArch64::Q12)
2279 .Case("v13", AArch64::Q13)
2280 .Case("v14", AArch64::Q14)
2281 .Case("v15", AArch64::Q15)
2282 .Case("v16", AArch64::Q16)
2283 .Case("v17", AArch64::Q17)
2284 .Case("v18", AArch64::Q18)
2285 .Case("v19", AArch64::Q19)
2286 .Case("v20", AArch64::Q20)
2287 .Case("v21", AArch64::Q21)
2288 .Case("v22", AArch64::Q22)
2289 .Case("v23", AArch64::Q23)
2290 .Case("v24", AArch64::Q24)
2291 .Case("v25", AArch64::Q25)
2292 .Case("v26", AArch64::Q26)
2293 .Case("v27", AArch64::Q27)
2294 .Case("v28", AArch64::Q28)
2295 .Case("v29", AArch64::Q29)
2296 .Case("v30", AArch64::Q30)
2297 .Case("v31", AArch64::Q31)
2298 .Default(0);
2299}
2300
2301/// Returns an optional pair of (#elements, element-width) if Suffix
2302/// is a valid vector kind. Where the number of elements in a vector
2303/// or the vector width is implicit or explicitly unknown (but still a
2304/// valid suffix kind), 0 is used.
2305static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2306 RegKind VectorKind) {
2307 std::pair<int, int> Res = {-1, -1};
2308
2309 switch (VectorKind) {
2310 case RegKind::NeonVector:
2311 Res =
2312 StringSwitch<std::pair<int, int>>(Suffix.lower())
2313 .Case("", {0, 0})
2314 .Case(".1d", {1, 64})
2315 .Case(".1q", {1, 128})
2316 // '.2h' needed for fp16 scalar pairwise reductions
2317 .Case(".2h", {2, 16})
2318 .Case(".2s", {2, 32})
2319 .Case(".2d", {2, 64})
2320 // '.4b' is another special case for the ARMv8.2a dot product
2321 // operand
2322 .Case(".4b", {4, 8})
2323 .Case(".4h", {4, 16})
2324 .Case(".4s", {4, 32})
2325 .Case(".8b", {8, 8})
2326 .Case(".8h", {8, 16})
2327 .Case(".16b", {16, 8})
2328 // Accept the width neutral ones, too, for verbose syntax. If those
2329 // aren't used in the right places, the token operand won't match so
2330 // all will work out.
2331 .Case(".b", {0, 8})
2332 .Case(".h", {0, 16})
2333 .Case(".s", {0, 32})
2334 .Case(".d", {0, 64})
2335 .Default({-1, -1});
2336 break;
2337 case RegKind::SVEPredicateVector:
2338 case RegKind::SVEDataVector:
2339 case RegKind::Matrix:
2340 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2341 .Case("", {0, 0})
2342 .Case(".b", {0, 8})
2343 .Case(".h", {0, 16})
2344 .Case(".s", {0, 32})
2345 .Case(".d", {0, 64})
2346 .Case(".q", {0, 128})
2347 .Default({-1, -1});
2348 break;
2349 default:
2350 llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2350)
;
2351 }
2352
2353 if (Res == std::make_pair(-1, -1))
2354 return Optional<std::pair<int, int>>();
2355
2356 return Optional<std::pair<int, int>>(Res);
2357}
2358
2359static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2360 return parseVectorKind(Suffix, VectorKind).hasValue();
2361}
2362
2363static unsigned matchSVEDataVectorRegName(StringRef Name) {
2364 return StringSwitch<unsigned>(Name.lower())
2365 .Case("z0", AArch64::Z0)
2366 .Case("z1", AArch64::Z1)
2367 .Case("z2", AArch64::Z2)
2368 .Case("z3", AArch64::Z3)
2369 .Case("z4", AArch64::Z4)
2370 .Case("z5", AArch64::Z5)
2371 .Case("z6", AArch64::Z6)
2372 .Case("z7", AArch64::Z7)
2373 .Case("z8", AArch64::Z8)
2374 .Case("z9", AArch64::Z9)
2375 .Case("z10", AArch64::Z10)
2376 .Case("z11", AArch64::Z11)
2377 .Case("z12", AArch64::Z12)
2378 .Case("z13", AArch64::Z13)
2379 .Case("z14", AArch64::Z14)
2380 .Case("z15", AArch64::Z15)
2381 .Case("z16", AArch64::Z16)
2382 .Case("z17", AArch64::Z17)
2383 .Case("z18", AArch64::Z18)
2384 .Case("z19", AArch64::Z19)
2385 .Case("z20", AArch64::Z20)
2386 .Case("z21", AArch64::Z21)
2387 .Case("z22", AArch64::Z22)
2388 .Case("z23", AArch64::Z23)
2389 .Case("z24", AArch64::Z24)
2390 .Case("z25", AArch64::Z25)
2391 .Case("z26", AArch64::Z26)
2392 .Case("z27", AArch64::Z27)
2393 .Case("z28", AArch64::Z28)
2394 .Case("z29", AArch64::Z29)
2395 .Case("z30", AArch64::Z30)
2396 .Case("z31", AArch64::Z31)
2397 .Default(0);
2398}
2399
2400static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2401 return StringSwitch<unsigned>(Name.lower())
2402 .Case("p0", AArch64::P0)
2403 .Case("p1", AArch64::P1)
2404 .Case("p2", AArch64::P2)
2405 .Case("p3", AArch64::P3)
2406 .Case("p4", AArch64::P4)
2407 .Case("p5", AArch64::P5)
2408 .Case("p6", AArch64::P6)
2409 .Case("p7", AArch64::P7)
2410 .Case("p8", AArch64::P8)
2411 .Case("p9", AArch64::P9)
2412 .Case("p10", AArch64::P10)
2413 .Case("p11", AArch64::P11)
2414 .Case("p12", AArch64::P12)
2415 .Case("p13", AArch64::P13)
2416 .Case("p14", AArch64::P14)
2417 .Case("p15", AArch64::P15)
2418 .Default(0);
2419}
2420
2421static unsigned matchMatrixRegName(StringRef Name) {
2422 return StringSwitch<unsigned>(Name.lower())
2423 .Case("za", AArch64::ZA)
2424 .Case("za0.q", AArch64::ZAQ0)
2425 .Case("za1.q", AArch64::ZAQ1)
2426 .Case("za2.q", AArch64::ZAQ2)
2427 .Case("za3.q", AArch64::ZAQ3)
2428 .Case("za4.q", AArch64::ZAQ4)
2429 .Case("za5.q", AArch64::ZAQ5)
2430 .Case("za6.q", AArch64::ZAQ6)
2431 .Case("za7.q", AArch64::ZAQ7)
2432 .Case("za8.q", AArch64::ZAQ8)
2433 .Case("za9.q", AArch64::ZAQ9)
2434 .Case("za10.q", AArch64::ZAQ10)
2435 .Case("za11.q", AArch64::ZAQ11)
2436 .Case("za12.q", AArch64::ZAQ12)
2437 .Case("za13.q", AArch64::ZAQ13)
2438 .Case("za14.q", AArch64::ZAQ14)
2439 .Case("za15.q", AArch64::ZAQ15)
2440 .Case("za0.d", AArch64::ZAD0)
2441 .Case("za1.d", AArch64::ZAD1)
2442 .Case("za2.d", AArch64::ZAD2)
2443 .Case("za3.d", AArch64::ZAD3)
2444 .Case("za4.d", AArch64::ZAD4)
2445 .Case("za5.d", AArch64::ZAD5)
2446 .Case("za6.d", AArch64::ZAD6)
2447 .Case("za7.d", AArch64::ZAD7)
2448 .Case("za0.s", AArch64::ZAS0)
2449 .Case("za1.s", AArch64::ZAS1)
2450 .Case("za2.s", AArch64::ZAS2)
2451 .Case("za3.s", AArch64::ZAS3)
2452 .Case("za0.h", AArch64::ZAH0)
2453 .Case("za1.h", AArch64::ZAH1)
2454 .Case("za0.b", AArch64::ZAB0)
2455 .Case("za0h.q", AArch64::ZAQ0)
2456 .Case("za1h.q", AArch64::ZAQ1)
2457 .Case("za2h.q", AArch64::ZAQ2)
2458 .Case("za3h.q", AArch64::ZAQ3)
2459 .Case("za4h.q", AArch64::ZAQ4)
2460 .Case("za5h.q", AArch64::ZAQ5)
2461 .Case("za6h.q", AArch64::ZAQ6)
2462 .Case("za7h.q", AArch64::ZAQ7)
2463 .Case("za8h.q", AArch64::ZAQ8)
2464 .Case("za9h.q", AArch64::ZAQ9)
2465 .Case("za10h.q", AArch64::ZAQ10)
2466 .Case("za11h.q", AArch64::ZAQ11)
2467 .Case("za12h.q", AArch64::ZAQ12)
2468 .Case("za13h.q", AArch64::ZAQ13)
2469 .Case("za14h.q", AArch64::ZAQ14)
2470 .Case("za15h.q", AArch64::ZAQ15)
2471 .Case("za0h.d", AArch64::ZAD0)
2472 .Case("za1h.d", AArch64::ZAD1)
2473 .Case("za2h.d", AArch64::ZAD2)
2474 .Case("za3h.d", AArch64::ZAD3)
2475 .Case("za4h.d", AArch64::ZAD4)
2476 .Case("za5h.d", AArch64::ZAD5)
2477 .Case("za6h.d", AArch64::ZAD6)
2478 .Case("za7h.d", AArch64::ZAD7)
2479 .Case("za0h.s", AArch64::ZAS0)
2480 .Case("za1h.s", AArch64::ZAS1)
2481 .Case("za2h.s", AArch64::ZAS2)
2482 .Case("za3h.s", AArch64::ZAS3)
2483 .Case("za0h.h", AArch64::ZAH0)
2484 .Case("za1h.h", AArch64::ZAH1)
2485 .Case("za0h.b", AArch64::ZAB0)
2486 .Case("za0v.q", AArch64::ZAQ0)
2487 .Case("za1v.q", AArch64::ZAQ1)
2488 .Case("za2v.q", AArch64::ZAQ2)
2489 .Case("za3v.q", AArch64::ZAQ3)
2490 .Case("za4v.q", AArch64::ZAQ4)
2491 .Case("za5v.q", AArch64::ZAQ5)
2492 .Case("za6v.q", AArch64::ZAQ6)
2493 .Case("za7v.q", AArch64::ZAQ7)
2494 .Case("za8v.q", AArch64::ZAQ8)
2495 .Case("za9v.q", AArch64::ZAQ9)
2496 .Case("za10v.q", AArch64::ZAQ10)
2497 .Case("za11v.q", AArch64::ZAQ11)
2498 .Case("za12v.q", AArch64::ZAQ12)
2499 .Case("za13v.q", AArch64::ZAQ13)
2500 .Case("za14v.q", AArch64::ZAQ14)
2501 .Case("za15v.q", AArch64::ZAQ15)
2502 .Case("za0v.d", AArch64::ZAD0)
2503 .Case("za1v.d", AArch64::ZAD1)
2504 .Case("za2v.d", AArch64::ZAD2)
2505 .Case("za3v.d", AArch64::ZAD3)
2506 .Case("za4v.d", AArch64::ZAD4)
2507 .Case("za5v.d", AArch64::ZAD5)
2508 .Case("za6v.d", AArch64::ZAD6)
2509 .Case("za7v.d", AArch64::ZAD7)
2510 .Case("za0v.s", AArch64::ZAS0)
2511 .Case("za1v.s", AArch64::ZAS1)
2512 .Case("za2v.s", AArch64::ZAS2)
2513 .Case("za3v.s", AArch64::ZAS3)
2514 .Case("za0v.h", AArch64::ZAH0)
2515 .Case("za1v.h", AArch64::ZAH1)
2516 .Case("za0v.b", AArch64::ZAB0)
2517 .Default(0);
2518}
2519
2520bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2521 SMLoc &EndLoc) {
2522 return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
27
Calling 'AArch64AsmParser::tryParseRegister'
33
Returning from 'AArch64AsmParser::tryParseRegister'
34
Returning without writing to 'RegNo'
2523}
2524
2525OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2526 SMLoc &StartLoc,
2527 SMLoc &EndLoc) {
2528 StartLoc = getLoc();
2529 auto Res = tryParseScalarRegister(RegNo);
28
Calling 'AArch64AsmParser::tryParseScalarRegister'
31
Returning from 'AArch64AsmParser::tryParseScalarRegister'
2530 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2531 return Res;
32
Returning without writing to 'RegNo'
2532}
2533
2534// Matches a register name or register alias previously defined by '.req'
2535unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2536 RegKind Kind) {
2537 unsigned RegNum = 0;
2538 if ((RegNum = matchSVEDataVectorRegName(Name)))
2539 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2540
2541 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2542 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2543
2544 if ((RegNum = MatchNeonVectorRegName(Name)))
2545 return Kind == RegKind::NeonVector ? RegNum : 0;
2546
2547 if ((RegNum = matchMatrixRegName(Name)))
2548 return Kind == RegKind::Matrix ? RegNum : 0;
2549
2550 // The parsed register must be of RegKind Scalar
2551 if ((RegNum = MatchRegisterName(Name)))
2552 return Kind == RegKind::Scalar ? RegNum : 0;
2553
2554 if (!RegNum) {
2555 // Handle a few common aliases of registers.
2556 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2557 .Case("fp", AArch64::FP)
2558 .Case("lr", AArch64::LR)
2559 .Case("x31", AArch64::XZR)
2560 .Case("w31", AArch64::WZR)
2561 .Default(0))
2562 return Kind == RegKind::Scalar ? RegNum : 0;
2563
2564 // Check for aliases registered via .req. Canonicalize to lower case.
2565 // That's more consistent since register names are case insensitive, and
2566 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2567 auto Entry = RegisterReqs.find(Name.lower());
2568 if (Entry == RegisterReqs.end())
2569 return 0;
2570
2571 // set RegNum if the match is the right kind of register
2572 if (Kind == Entry->getValue().first)
2573 RegNum = Entry->getValue().second;
2574 }
2575 return RegNum;
2576}
2577
2578/// tryParseScalarRegister - Try to parse a register name. The token must be an
2579/// Identifier when called, and if it is a register name the token is eaten and
2580/// the register is added to the operand list.
2581OperandMatchResultTy
2582AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2583 MCAsmParser &Parser = getParser();
2584 const AsmToken &Tok = Parser.getTok();
2585 if (Tok.isNot(AsmToken::Identifier))
29
Taking true branch
2586 return MatchOperand_NoMatch;
30
Returning without writing to 'RegNum'
2587
2588 std::string lowerCase = Tok.getString().lower();
2589 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2590 if (Reg == 0)
2591 return MatchOperand_NoMatch;
2592
2593 RegNum = Reg;
2594 Parser.Lex(); // Eat identifier token.
2595 return MatchOperand_Success;
2596}
2597
2598/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2599OperandMatchResultTy
2600AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2601 MCAsmParser &Parser = getParser();
2602 SMLoc S = getLoc();
2603
2604 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2605 Error(S, "Expected cN operand where 0 <= N <= 15");
2606 return MatchOperand_ParseFail;
2607 }
2608
2609 StringRef Tok = Parser.getTok().getIdentifier();
2610 if (Tok[0] != 'c' && Tok[0] != 'C') {
2611 Error(S, "Expected cN operand where 0 <= N <= 15");
2612 return MatchOperand_ParseFail;
2613 }
2614
2615 uint32_t CRNum;
2616 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2617 if (BadNum || CRNum > 15) {
2618 Error(S, "Expected cN operand where 0 <= N <= 15");
2619 return MatchOperand_ParseFail;
2620 }
2621
2622 Parser.Lex(); // Eat identifier token.
2623 Operands.push_back(
2624 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2625 return MatchOperand_Success;
2626}
2627
2628/// tryParsePrefetch - Try to parse a prefetch operand.
2629template <bool IsSVEPrefetch>
2630OperandMatchResultTy
2631AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2632 MCAsmParser &Parser = getParser();
2633 SMLoc S = getLoc();
2634 const AsmToken &Tok = Parser.getTok();
2635
2636 auto LookupByName = [](StringRef N) {
2637 if (IsSVEPrefetch) {
2638 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2639 return Optional<unsigned>(Res->Encoding);
2640 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2641 return Optional<unsigned>(Res->Encoding);
2642 return Optional<unsigned>();
2643 };
2644
2645 auto LookupByEncoding = [](unsigned E) {
2646 if (IsSVEPrefetch) {
2647 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2648 return Optional<StringRef>(Res->Name);
2649 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2650 return Optional<StringRef>(Res->Name);
2651 return Optional<StringRef>();
2652 };
2653 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2654
2655 // Either an identifier for named values or a 5-bit immediate.
2656 // Eat optional hash.
2657 if (parseOptionalToken(AsmToken::Hash) ||
2658 Tok.is(AsmToken::Integer)) {
2659 const MCExpr *ImmVal;
2660 if (getParser().parseExpression(ImmVal))
2661 return MatchOperand_ParseFail;
2662
2663 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2664 if (!MCE) {
2665 TokError("immediate value expected for prefetch operand");
2666 return MatchOperand_ParseFail;
2667 }
2668 unsigned prfop = MCE->getValue();
2669 if (prfop > MaxVal) {
2670 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2671 "] expected");
2672 return MatchOperand_ParseFail;
2673 }
2674
2675 auto PRFM = LookupByEncoding(MCE->getValue());
2676 Operands.push_back(AArch64Operand::CreatePrefetch(
2677 prfop, PRFM.getValueOr(""), S, getContext()));
2678 return MatchOperand_Success;
2679 }
2680
2681 if (Tok.isNot(AsmToken::Identifier)) {
2682 TokError("prefetch hint expected");
2683 return MatchOperand_ParseFail;
2684 }
2685
2686 auto PRFM = LookupByName(Tok.getString());
2687 if (!PRFM) {
2688 TokError("prefetch hint expected");
2689 return MatchOperand_ParseFail;
2690 }
2691
2692 Operands.push_back(AArch64Operand::CreatePrefetch(
2693 *PRFM, Tok.getString(), S, getContext()));
2694 Parser.Lex(); // Eat identifier token.
2695 return MatchOperand_Success;
2696}
2697
2698/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2699OperandMatchResultTy
2700AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2701 MCAsmParser &Parser = getParser();
2702 SMLoc S = getLoc();
2703 const AsmToken &Tok = Parser.getTok();
2704 if (Tok.isNot(AsmToken::Identifier)) {
2705 TokError("invalid operand for instruction");
2706 return MatchOperand_ParseFail;
2707 }
2708
2709 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2710 if (!PSB) {
2711 TokError("invalid operand for instruction");
2712 return MatchOperand_ParseFail;
2713 }
2714
2715 Operands.push_back(AArch64Operand::CreatePSBHint(
2716 PSB->Encoding, Tok.getString(), S, getContext()));
2717 Parser.Lex(); // Eat identifier token.
2718 return MatchOperand_Success;
2719}
2720
2721/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2722OperandMatchResultTy
2723AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2724 MCAsmParser &Parser = getParser();
2725 SMLoc S = getLoc();
2726 const AsmToken &Tok = Parser.getTok();
2727 if (Tok.isNot(AsmToken::Identifier)) {
2728 TokError("invalid operand for instruction");
2729 return MatchOperand_ParseFail;
2730 }
2731
2732 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2733 if (!BTI) {
2734 TokError("invalid operand for instruction");
2735 return MatchOperand_ParseFail;
2736 }
2737
2738 Operands.push_back(AArch64Operand::CreateBTIHint(
2739 BTI->Encoding, Tok.getString(), S, getContext()));
2740 Parser.Lex(); // Eat identifier token.
2741 return MatchOperand_Success;
2742}
2743
2744/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2745/// instruction.
2746OperandMatchResultTy
2747AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2748 MCAsmParser &Parser = getParser();
2749 SMLoc S = getLoc();
2750 const MCExpr *Expr = nullptr;
2751
2752 if (Parser.getTok().is(AsmToken::Hash)) {
2753 Parser.Lex(); // Eat hash token.
2754 }
2755
2756 if (parseSymbolicImmVal(Expr))
2757 return MatchOperand_ParseFail;
2758
2759 AArch64MCExpr::VariantKind ELFRefKind;
2760 MCSymbolRefExpr::VariantKind DarwinRefKind;
2761 int64_t Addend;
2762 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2763 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2764 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2765 // No modifier was specified at all; this is the syntax for an ELF basic
2766 // ADRP relocation (unfortunately).
2767 Expr =
2768 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2769 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2770 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2771 Addend != 0) {
2772 Error(S, "gotpage label reference not allowed an addend");
2773 return MatchOperand_ParseFail;
2774 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2775 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2776 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2777 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2778 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2779 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
2780 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2781 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2782 // The operand must be an @page or @gotpage qualified symbolref.
2783 Error(S, "page or gotpage label reference expected");
2784 return MatchOperand_ParseFail;
2785 }
2786 }
2787
2788 // We have either a label reference possibly with addend or an immediate. The
2789 // addend is a raw value here. The linker will adjust it to only reference the
2790 // page.
2791 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2792 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2793
2794 return MatchOperand_Success;
2795}
2796
2797/// tryParseAdrLabel - Parse and validate a source label for the ADR
2798/// instruction.
2799OperandMatchResultTy
2800AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2801 SMLoc S = getLoc();
2802 const MCExpr *Expr = nullptr;
2803
2804 // Leave anything with a bracket to the default for SVE
2805 if (getParser().getTok().is(AsmToken::LBrac))
2806 return MatchOperand_NoMatch;
2807
2808 if (getParser().getTok().is(AsmToken::Hash))
2809 getParser().Lex(); // Eat hash token.
2810
2811 if (parseSymbolicImmVal(Expr))
2812 return MatchOperand_ParseFail;
2813
2814 AArch64MCExpr::VariantKind ELFRefKind;
2815 MCSymbolRefExpr::VariantKind DarwinRefKind;
2816 int64_t Addend;
2817 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2818 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2819 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2820 // No modifier was specified at all; this is the syntax for an ELF basic
2821 // ADR relocation (unfortunately).
2822 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2823 } else {
2824 Error(S, "unexpected adr label");
2825 return MatchOperand_ParseFail;
2826 }
2827 }
2828
2829 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2830 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2831 return MatchOperand_Success;
2832}
2833
2834/// tryParseFPImm - A floating point immediate expression operand.
2835template<bool AddFPZeroAsLiteral>
2836OperandMatchResultTy
2837AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2838 MCAsmParser &Parser = getParser();
2839 SMLoc S = getLoc();
2840
2841 bool Hash = parseOptionalToken(AsmToken::Hash);
2842
2843 // Handle negation, as that still comes through as a separate token.
2844 bool isNegative = parseOptionalToken(AsmToken::Minus);
2845
2846 const AsmToken &Tok = Parser.getTok();
2847 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2848 if (!Hash)
2849 return MatchOperand_NoMatch;
2850 TokError("invalid floating point immediate");
2851 return MatchOperand_ParseFail;
2852 }
2853
2854 // Parse hexadecimal representation.
2855 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2856 if (Tok.getIntVal() > 255 || isNegative) {
2857 TokError("encoded floating point value out of range");
2858 return MatchOperand_ParseFail;
2859 }
2860
2861 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2862 Operands.push_back(
2863 AArch64Operand::CreateFPImm(F, true, S, getContext()));
2864 } else {
2865 // Parse FP representation.
2866 APFloat RealVal(APFloat::IEEEdouble());
2867 auto StatusOrErr =
2868 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2869 if (errorToBool(StatusOrErr.takeError())) {
2870 TokError("invalid floating point representation");
2871 return MatchOperand_ParseFail;
2872 }
2873
2874 if (isNegative)
2875 RealVal.changeSign();
2876
2877 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2878 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
2879 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
2880 } else
2881 Operands.push_back(AArch64Operand::CreateFPImm(
2882 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2883 }
2884
2885 Parser.Lex(); // Eat the token.
2886
2887 return MatchOperand_Success;
2888}
2889
2890/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2891/// a shift suffix, for example '#1, lsl #12'.
2892OperandMatchResultTy
2893AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2894 MCAsmParser &Parser = getParser();
2895 SMLoc S = getLoc();
2896
2897 if (Parser.getTok().is(AsmToken::Hash))
2898 Parser.Lex(); // Eat '#'
2899 else if (Parser.getTok().isNot(AsmToken::Integer))
2900 // Operand should start from # or should be integer, emit error otherwise.
2901 return MatchOperand_NoMatch;
2902
2903 const MCExpr *Imm = nullptr;
2904 if (parseSymbolicImmVal(Imm))
2905 return MatchOperand_ParseFail;
2906 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2907 Operands.push_back(
2908 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
2909 return MatchOperand_Success;
2910 }
2911
2912 // Eat ','
2913 Parser.Lex();
2914
2915 // The optional operand must be "lsl #N" where N is non-negative.
2916 if (!Parser.getTok().is(AsmToken::Identifier) ||
2917 !Parser.getTok().getIdentifier().equals_insensitive("lsl")) {
2918 Error(getLoc(), "only 'lsl #+N' valid after immediate");
2919 return MatchOperand_ParseFail;
2920 }
2921
2922 // Eat 'lsl'
2923 Parser.Lex();
2924
2925 parseOptionalToken(AsmToken::Hash);
2926
2927 if (Parser.getTok().isNot(AsmToken::Integer)) {
2928 Error(getLoc(), "only 'lsl #+N' valid after immediate");
2929 return MatchOperand_ParseFail;
2930 }
2931
2932 int64_t ShiftAmount = Parser.getTok().getIntVal();
2933
2934 if (ShiftAmount < 0) {
2935 Error(getLoc(), "positive shift amount required");
2936 return MatchOperand_ParseFail;
2937 }
2938 Parser.Lex(); // Eat the number
2939
2940 // Just in case the optional lsl #0 is used for immediates other than zero.
2941 if (ShiftAmount == 0 && Imm != nullptr) {
2942 Operands.push_back(
2943 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
2944 return MatchOperand_Success;
2945 }
2946
2947 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
2948 getLoc(), getContext()));
2949 return MatchOperand_Success;
2950}
2951
2952/// parseCondCodeString - Parse a Condition Code string.
2953AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2954 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2955 .Case("eq", AArch64CC::EQ)
2956 .Case("ne", AArch64CC::NE)
2957 .Case("cs", AArch64CC::HS)
2958 .Case("hs", AArch64CC::HS)
2959 .Case("cc", AArch64CC::LO)
2960 .Case("lo", AArch64CC::LO)
2961 .Case("mi", AArch64CC::MI)
2962 .Case("pl", AArch64CC::PL)
2963 .Case("vs", AArch64CC::VS)
2964 .Case("vc", AArch64CC::VC)
2965 .Case("hi", AArch64CC::HI)
2966 .Case("ls", AArch64CC::LS)
2967 .Case("ge", AArch64CC::GE)
2968 .Case("lt", AArch64CC::LT)
2969 .Case("gt", AArch64CC::GT)
2970 .Case("le", AArch64CC::LE)
2971 .Case("al", AArch64CC::AL)
2972 .Case("nv", AArch64CC::NV)
2973 .Default(AArch64CC::Invalid);
2974
2975 if (CC == AArch64CC::Invalid &&
2976 getSTI().getFeatureBits()[AArch64::FeatureSVE])
2977 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2978 .Case("none", AArch64CC::EQ)
2979 .Case("any", AArch64CC::NE)
2980 .Case("nlast", AArch64CC::HS)
2981 .Case("last", AArch64CC::LO)
2982 .Case("first", AArch64CC::MI)
2983 .Case("nfrst", AArch64CC::PL)
2984 .Case("pmore", AArch64CC::HI)
2985 .Case("plast", AArch64CC::LS)
2986 .Case("tcont", AArch64CC::GE)
2987 .Case("tstop", AArch64CC::LT)
2988 .Default(AArch64CC::Invalid);
2989
2990 return CC;
2991}
2992
2993/// parseCondCode - Parse a Condition Code operand.
2994bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2995 bool invertCondCode) {
2996 MCAsmParser &Parser = getParser();
2997 SMLoc S = getLoc();
2998 const AsmToken &Tok = Parser.getTok();
2999 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")(static_cast <bool> (Tok.is(AsmToken::Identifier) &&
"Token is not an Identifier") ? void (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2999, __extension__ __PRETTY_FUNCTION__))
;
3000
3001 StringRef Cond = Tok.getString();
3002 AArch64CC::CondCode CC = parseCondCodeString(Cond);
3003 if (CC == AArch64CC::Invalid)
3004 return TokError("invalid condition code");
3005 Parser.Lex(); // Eat identifier token.
3006
3007 if (invertCondCode) {
3008 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3009 return TokError("condition codes AL and NV are invalid for this instruction");
3010 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
3011 }
3012
3013 Operands.push_back(
3014 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3015 return false;
3016}
3017
3018OperandMatchResultTy
3019AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3020 MCAsmParser &Parser = getParser();
3021 const AsmToken &Tok = Parser.getTok();
3022 SMLoc S = getLoc();
3023
3024 if (Tok.isNot(AsmToken::Identifier)) {
3025 TokError("invalid operand for instruction");
3026 return MatchOperand_ParseFail;
3027 }
3028
3029 unsigned PStateImm = -1;
3030 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3031 if (SVCR && SVCR->haveFeatures(getSTI().getFeatureBits()))
3032 PStateImm = SVCR->Encoding;
3033
3034 Operands.push_back(
3035 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3036 Parser.Lex(); // Eat identifier token.
3037 return MatchOperand_Success;
3038}
3039
3040OperandMatchResultTy
3041AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3042 MCAsmParser &Parser = getParser();
3043 const AsmToken &Tok = Parser.getTok();
3044 SMLoc S = getLoc();
3045
3046 StringRef Name = Tok.getString();
3047
3048 if (Name.equals_insensitive("za")) {
3049 Parser.Lex(); // eat "za"
3050 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3051 AArch64::ZA, /*ElementWidth=*/0, MatrixKind::Array, S, getLoc(),
3052 getContext()));
3053 if (getLexer().is(AsmToken::LBrac)) {
3054 // There's no comma after matrix operand, so we can parse the next operand
3055 // immediately.
3056 if (parseOperand(Operands, false, false))
3057 return MatchOperand_NoMatch;
3058 }
3059 return MatchOperand_Success;
3060 }
3061
3062 // Try to parse matrix register.
3063 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3064 if (!Reg)
3065 return MatchOperand_NoMatch;
3066
3067 size_t DotPosition = Name.find('.');
3068 assert(DotPosition != StringRef::npos && "Unexpected register")(static_cast <bool> (DotPosition != StringRef::npos &&
"Unexpected register") ? void (0) : __assert_fail ("DotPosition != StringRef::npos && \"Unexpected register\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3068, __extension__ __PRETTY_FUNCTION__))
;
3069
3070 StringRef Head = Name.take_front(DotPosition);
3071 StringRef Tail = Name.drop_front(DotPosition);
3072 StringRef RowOrColumn = Head.take_back();
3073
3074 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn)
3075 .Case("h", MatrixKind::Row)
3076 .Case("v", MatrixKind::Col)
3077 .Default(MatrixKind::Tile);
3078
3079 // Next up, parsing the suffix
3080 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3081 if (!KindRes) {
3082 TokError("Expected the register to be followed by element width suffix");
3083 return MatchOperand_ParseFail;
3084 }
3085 unsigned ElementWidth = KindRes->second;
3086
3087 Parser.Lex();
3088
3089 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3090 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3091
3092 if (getLexer().is(AsmToken::LBrac)) {
3093 // There's no comma after matrix operand, so we can parse the next operand
3094 // immediately.
3095 if (parseOperand(Operands, false, false))
3096 return MatchOperand_NoMatch;
3097 }
3098 return MatchOperand_Success;
3099}
3100
3101/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3102/// them if present.
3103OperandMatchResultTy
3104AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3105 MCAsmParser &Parser = getParser();
3106 const AsmToken &Tok = Parser.getTok();
3107 std::string LowerID = Tok.getString().lower();
3108 AArch64_AM::ShiftExtendType ShOp =
3109 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3110 .Case("lsl", AArch64_AM::LSL)
3111 .Case("lsr", AArch64_AM::LSR)
3112 .Case("asr", AArch64_AM::ASR)
3113 .Case("ror", AArch64_AM::ROR)
3114 .Case("msl", AArch64_AM::MSL)
3115 .Case("uxtb", AArch64_AM::UXTB)
3116 .Case("uxth", AArch64_AM::UXTH)
3117 .Case("uxtw", AArch64_AM::UXTW)
3118 .Case("uxtx", AArch64_AM::UXTX)
3119 .Case("sxtb", AArch64_AM::SXTB)
3120 .Case("sxth", AArch64_AM::SXTH)
3121 .Case("sxtw", AArch64_AM::SXTW)
3122 .Case("sxtx", AArch64_AM::SXTX)
3123 .Default(AArch64_AM::InvalidShiftExtend);
3124
3125 if (ShOp == AArch64_AM::InvalidShiftExtend)
3126 return MatchOperand_NoMatch;
3127
3128 SMLoc S = Tok.getLoc();
3129 Parser.Lex();
3130
3131 bool Hash = parseOptionalToken(AsmToken::Hash);
3132
3133 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3134 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3135 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3136 ShOp == AArch64_AM::MSL) {
3137 // We expect a number here.
3138 TokError("expected #imm after shift specifier");
3139 return MatchOperand_ParseFail;
3140 }
3141
3142 // "extend" type operations don't need an immediate, #0 is implicit.
3143 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3144 Operands.push_back(
3145 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3146 return MatchOperand_Success;
3147 }
3148
3149 // Make sure we do actually have a number, identifier or a parenthesized
3150 // expression.
3151 SMLoc E = getLoc();
3152 if (!Parser.getTok().is(AsmToken::Integer) &&
3153 !Parser.getTok().is(AsmToken::LParen) &&
3154 !Parser.getTok().is(AsmToken::Identifier)) {
3155 Error(E, "expected integer shift amount");
3156 return MatchOperand_ParseFail;
3157 }
3158
3159 const MCExpr *ImmVal;
3160 if (getParser().parseExpression(ImmVal))
3161 return MatchOperand_ParseFail;
3162
3163 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3164 if (!MCE) {
3165 Error(E, "expected constant '#imm' after shift specifier");
3166 return MatchOperand_ParseFail;
3167 }
3168
3169 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3170 Operands.push_back(AArch64Operand::CreateShiftExtend(
3171 ShOp, MCE->getValue(), true, S, E, getContext()));
3172 return MatchOperand_Success;
3173}
3174
3175static const struct Extension {
3176 const char *Name;
3177 const FeatureBitset Features;
3178} ExtensionMap[] = {
3179 {"crc", {AArch64::FeatureCRC}},
3180 {"sm4", {AArch64::FeatureSM4}},
3181 {"sha3", {AArch64::FeatureSHA3}},
3182 {"sha2", {AArch64::FeatureSHA2}},
3183 {"aes", {AArch64::FeatureAES}},
3184 {"crypto", {AArch64::FeatureCrypto}},
3185 {"fp", {AArch64::FeatureFPARMv8}},
3186 {"simd", {AArch64::FeatureNEON}},
3187 {"ras", {AArch64::FeatureRAS}},
3188 {"lse", {AArch64::FeatureLSE}},
3189 {"predres", {AArch64::FeaturePredRes}},
3190 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3191 {"mte", {AArch64::FeatureMTE}},
3192 {"memtag", {AArch64::FeatureMTE}},
3193 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3194 {"pan", {AArch64::FeaturePAN}},
3195 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3196 {"ccpp", {AArch64::FeatureCCPP}},
3197 {"rcpc", {AArch64::FeatureRCPC}},
3198 {"rng", {AArch64::FeatureRandGen}},
3199 {"sve", {AArch64::FeatureSVE}},
3200 {"sve2", {AArch64::FeatureSVE2}},
3201 {"sve2-aes", {AArch64::FeatureSVE2AES}},
3202 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3203 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3204 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3205 {"ls64", {AArch64::FeatureLS64}},
3206 {"xs", {AArch64::FeatureXS}},
3207 {"pauth", {AArch64::FeaturePAuth}},
3208 {"flagm", {AArch64::FeatureFlagM}},
3209 {"rme", {AArch64::FeatureRME}},
3210 {"sme", {AArch64::FeatureSME}},
3211 {"sme-f64", {AArch64::FeatureSMEF64}},
3212 {"sme-i64", {AArch64::FeatureSMEI64}},
3213 // FIXME: Unsupported extensions
3214 {"lor", {}},
3215 {"rdma", {}},
3216 {"profile", {}},
3217};
3218
3219static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3220 if (FBS[AArch64::HasV8_1aOps])
3221 Str += "ARMv8.1a";
3222 else if (FBS[AArch64::HasV8_2aOps])
3223 Str += "ARMv8.2a";
3224 else if (FBS[AArch64::HasV8_3aOps])
3225 Str += "ARMv8.3a";
3226 else if (FBS[AArch64::HasV8_4aOps])
3227 Str += "ARMv8.4a";
3228 else if (FBS[AArch64::HasV8_5aOps])
3229 Str += "ARMv8.5a";
3230 else if (FBS[AArch64::HasV8_6aOps])
3231 Str += "ARMv8.6a";
3232 else if (FBS[AArch64::HasV8_7aOps])
3233 Str += "ARMv8.7a";
3234 else {
3235 SmallVector<std::string, 2> ExtMatches;
3236 for (const auto& Ext : ExtensionMap) {
3237 // Use & in case multiple features are enabled
3238 if ((FBS & Ext.Features) != FeatureBitset())
3239 ExtMatches.push_back(Ext.Name);
3240 }
3241 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3242 }
3243}
3244
3245void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3246 SMLoc S) {
3247 const uint16_t Op2 = Encoding & 7;
3248 const uint16_t Cm = (Encoding & 0x78) >> 3;
3249 const uint16_t Cn = (Encoding & 0x780) >> 7;
3250 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3251
3252 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3253
3254 Operands.push_back(
3255 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3256 Operands.push_back(
3257 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3258 Operands.push_back(
3259 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3260 Expr = MCConstantExpr::create(Op2, getContext());
3261 Operands.push_back(
3262 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3263}
3264
3265/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3266/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3267bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3268 OperandVector &Operands) {
3269 if (Name.find('.') != StringRef::npos)
3270 return TokError("invalid operand");
3271
3272 Mnemonic = Name;
3273 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3274
3275 MCAsmParser &Parser = getParser();
3276 const AsmToken &Tok = Parser.getTok();
3277 StringRef Op = Tok.getString();
3278 SMLoc S = Tok.getLoc();
3279
3280 if (Mnemonic == "ic") {
3281 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3282 if (!IC)
3283 return TokError("invalid operand for IC instruction");
3284 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3285 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3286 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
3287 return TokError(Str.c_str());
3288 }
3289 createSysAlias(IC->Encoding, Operands, S);
3290 } else if (Mnemonic == "dc") {
3291 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3292 if (!DC)
3293 return TokError("invalid operand for DC instruction");
3294 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3295 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3296 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3297 return TokError(Str.c_str());
3298 }
3299 createSysAlias(DC->Encoding, Operands, S);
3300 } else if (Mnemonic == "at") {
3301 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3302 if (!AT)
3303 return TokError("invalid operand for AT instruction");
3304 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3305 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3306 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
3307 return TokError(Str.c_str());
3308 }
3309 createSysAlias(AT->Encoding, Operands, S);
3310 } else if (Mnemonic == "tlbi") {
3311 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3312 if (!TLBI)
3313 return TokError("invalid operand for TLBI instruction");
3314 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3315 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3316 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
3317 return TokError(Str.c_str());
3318 }
3319 createSysAlias(TLBI->Encoding, Operands, S);
3320 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
3321 const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
3322 if (!PRCTX)
3323 return TokError("invalid operand for prediction restriction instruction");
3324 else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
3325 std::string Str(
3326 Mnemonic.upper() + std::string(PRCTX->Name) + " requires: ");
3327 setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
3328 return TokError(Str.c_str());
3329 }
3330 uint16_t PRCTX_Op2 =
3331 Mnemonic == "cfp" ? 4 :
3332 Mnemonic == "dvp" ? 5 :
3333 Mnemonic == "cpp" ? 7 :
3334 0;
3335 assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction")(static_cast <bool> (PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction"
) ? void (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3335, __extension__ __PRETTY_FUNCTION__))
;
3336 createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
3337 }
3338
3339 Parser.Lex(); // Eat operand.
3340
3341 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3342 bool HasRegister = false;
3343
3344 // Check for the optional register operand.
3345 if (parseOptionalToken(AsmToken::Comma)) {
3346 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3347 return TokError("expected register operand");
3348 HasRegister = true;
3349 }
3350
3351 if (ExpectRegister && !HasRegister)
3352 return TokError("specified " + Mnemonic + " op requires a register");
3353 else if (!ExpectRegister && HasRegister)
3354 return TokError("specified " + Mnemonic + " op does not use a register");
3355
3356 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3357 return true;
3358
3359 return false;
3360}
3361
3362OperandMatchResultTy
3363AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3364 MCAsmParser &Parser = getParser();
3365 const AsmToken &Tok = Parser.getTok();
3366
3367 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3368 TokError("'csync' operand expected");
3369 return MatchOperand_ParseFail;
3370 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3371 // Immediate operand.
3372 const MCExpr *ImmVal;
3373 SMLoc ExprLoc = getLoc();
3374 AsmToken IntTok = Tok;
3375 if (getParser().parseExpression(ImmVal))
3376 return MatchOperand_ParseFail;
3377 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3378 if (!MCE) {
3379 Error(ExprLoc, "immediate value expected for barrier operand");
3380 return MatchOperand_ParseFail;
3381 }
3382 int64_t Value = MCE->getValue();
3383 if (Mnemonic == "dsb" && Value > 15) {
3384 // This case is a no match here, but it might be matched by the nXS
3385 // variant. Deliberately not unlex the optional '#' as it is not necessary
3386 // to characterize an integer immediate.
3387 Parser.getLexer().UnLex(IntTok);
3388 return MatchOperand_NoMatch;
3389 }
3390 if (Value < 0 || Value > 15) {
3391 Error(ExprLoc, "barrier operand out of range");
3392 return MatchOperand_ParseFail;
3393 }
3394 auto DB = AArch64DB::lookupDBByEncoding(Value);
3395 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3396 ExprLoc, getContext(),
3397 false /*hasnXSModifier*/));
3398 return MatchOperand_Success;
3399 }
3400
3401 if (Tok.isNot(AsmToken::Identifier)) {
3402 TokError("invalid operand for instruction");
3403 return MatchOperand_ParseFail;
3404 }
3405
3406 StringRef Operand = Tok.getString();
3407 auto TSB = AArch64TSB::lookupTSBByName(Operand);
3408 auto DB = AArch64DB::lookupDBByName(Operand);
3409 // The only valid named option for ISB is 'sy'
3410 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3411 TokError("'sy' or #imm operand expected");
3412 return MatchOperand_ParseFail;
3413 // The only valid named option for TSB is 'csync'
3414 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3415 TokError("'csync' operand expected");
3416 return MatchOperand_ParseFail;
3417 } else if (!DB && !TSB) {
3418 if (Mnemonic == "dsb") {
3419 // This case is a no match here, but it might be matched by the nXS
3420 // variant.
3421 return MatchOperand_NoMatch;
3422 }
3423 TokError("invalid barrier option name");
3424 return MatchOperand_ParseFail;
3425 }
3426
3427 Operands.push_back(AArch64Operand::CreateBarrier(
3428 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3429 getContext(), false /*hasnXSModifier*/));
3430 Parser.Lex(); // Consume the option
3431
3432 return MatchOperand_Success;
3433}
3434
3435OperandMatchResultTy
3436AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3437 MCAsmParser &Parser = getParser();
3438 const AsmToken &Tok = Parser.getTok();
3439
3440 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands")(static_cast <bool> (Mnemonic == "dsb" && "Instruction does not accept nXS operands"
) ? void (0) : __assert_fail ("Mnemonic == \"dsb\" && \"Instruction does not accept nXS operands\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3440, __extension__ __PRETTY_FUNCTION__))
;
3441 if (Mnemonic != "dsb")
3442 return MatchOperand_ParseFail;
3443
3444 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3445 // Immediate operand.
3446 const MCExpr *ImmVal;
3447 SMLoc ExprLoc = getLoc();
3448 if (getParser().parseExpression(ImmVal))
3449 return MatchOperand_ParseFail;
3450 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3451 if (!MCE) {
3452 Error(ExprLoc, "immediate value expected for barrier operand");
3453 return MatchOperand_ParseFail;
3454 }
3455 int64_t Value = MCE->getValue();
3456 // v8.7-A DSB in the nXS variant accepts only the following immediate
3457 // values: 16, 20, 24, 28.
3458 if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
3459 Error(ExprLoc, "barrier operand out of range");
3460 return MatchOperand_ParseFail;
3461 }
3462 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
3463 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
3464 ExprLoc, getContext(),
3465 true /*hasnXSModifier*/));
3466 return MatchOperand_Success;
3467 }
3468
3469 if (Tok.isNot(AsmToken::Identifier)) {
3470 TokError("invalid operand for instruction");
3471 return MatchOperand_ParseFail;
3472 }
3473
3474 StringRef Operand = Tok.getString();
3475 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
3476
3477 if (!DB) {
3478 TokError("invalid barrier option name");
3479 return MatchOperand_ParseFail;
3480 }
3481
3482 Operands.push_back(
3483 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
3484 getContext(), true /*hasnXSModifier*/));
3485 Parser.Lex(); // Consume the option
3486
3487 return MatchOperand_Success;
3488}
3489
3490OperandMatchResultTy
3491AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3492 MCAsmParser &Parser = getParser();
3493 const AsmToken &Tok = Parser.getTok();
3494
3495 if (Tok.isNot(AsmToken::Identifier))
3496 return MatchOperand_NoMatch;
3497
3498 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
3499 return MatchOperand_NoMatch;
3500
3501 int MRSReg, MSRReg;
3502 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3503 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3504 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3505 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3506 } else
3507 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3508
3509 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3510 unsigned PStateImm = -1;
3511 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3512 PStateImm = PState->Encoding;
3513
3514 Operands.push_back(
3515 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3516 PStateImm, getContext()));
3517 Parser.Lex(); // Eat identifier
3518
3519 return MatchOperand_Success;
3520}
3521
3522/// tryParseNeonVectorRegister - Parse a vector register operand.
3523bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3524 MCAsmParser &Parser = getParser();
3525 if (Parser.getTok().isNot(AsmToken::Identifier))
3526 return true;
3527
3528 SMLoc S = getLoc();
3529 // Check for a vector register specifier first.
3530 StringRef Kind;
3531 unsigned Reg;
3532 OperandMatchResultTy Res =
3533 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3534 if (Res != MatchOperand_Success)
3535 return true;
3536
3537 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3538 if (!KindRes)
3539 return true;
3540
3541 unsigned ElementWidth = KindRes->second;
3542 Operands.push_back(
3543 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3544 S, getLoc(), getContext()));
3545
3546 // If there was an explicit qualifier, that goes on as a literal text
3547 // operand.
3548 if (!Kind.empty())
3549 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
3550
3551 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3552}
3553
3554OperandMatchResultTy
3555AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3556 SMLoc SIdx = getLoc();
3557 if (parseOptionalToken(AsmToken::LBrac)) {
3558 const MCExpr *ImmVal;
3559 if (getParser().parseExpression(ImmVal))
3560 return MatchOperand_NoMatch;
3561 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3562 if (!MCE) {
3563 TokError("immediate value expected for vector index");
3564 return MatchOperand_ParseFail;;
3565 }
3566
3567 SMLoc E = getLoc();
3568
3569 if (parseToken(AsmToken::RBrac, "']' expected"))
3570 return MatchOperand_ParseFail;;
3571
3572 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3573 E, getContext()));
3574 return MatchOperand_Success;
3575 }
3576
3577 return MatchOperand_NoMatch;
3578}
3579
3580// tryParseVectorRegister - Try to parse a vector register name with
3581// optional kind specifier. If it is a register specifier, eat the token
3582// and return it.
3583OperandMatchResultTy
3584AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3585 RegKind MatchKind) {
3586 MCAsmParser &Parser = getParser();
3587 const AsmToken &Tok = Parser.getTok();
3588
3589 if (Tok.isNot(AsmToken::Identifier))
3590 return MatchOperand_NoMatch;
3591
3592 StringRef Name = Tok.getString();
3593 // If there is a kind specifier, it's separated from the register name by
3594 // a '.'.
3595 size_t Start = 0, Next = Name.find('.');
3596 StringRef Head = Name.slice(Start, Next);
3597 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3598
3599 if (RegNum) {
3600 if (Next != StringRef::npos) {
3601 Kind = Name.slice(Next, StringRef::npos);
3602 if (!isValidVectorKind(Kind, MatchKind)) {
3603 TokError("invalid vector kind qualifier");
3604 return MatchOperand_ParseFail;
3605 }
3606 }
3607 Parser.Lex(); // Eat the register token.
3608
3609 Reg = RegNum;
3610 return MatchOperand_Success;
3611 }
3612
3613 return MatchOperand_NoMatch;
3614}
3615
3616/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3617OperandMatchResultTy
3618AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3619 // Check for a SVE predicate register specifier first.
3620 const SMLoc S = getLoc();
3621 StringRef Kind;
3622 unsigned RegNum;
3623 auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3624 if (Res != MatchOperand_Success)
3625 return Res;
3626
3627 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3628 if (!KindRes)
3629 return MatchOperand_NoMatch;
3630
3631 unsigned ElementWidth = KindRes->second;
3632 Operands.push_back(AArch64Operand::CreateVectorReg(
3633 RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3634 getLoc(), getContext()));
3635
3636 if (getLexer().is(AsmToken::LBrac)) {
3637 // Indexed predicate, there's no comma so try parse the next operand
3638 // immediately.
3639 if (parseOperand(Operands, false, false))
3640 return MatchOperand_NoMatch;
3641 }
3642
3643 // Not all predicates are followed by a '/m' or '/z'.
3644 MCAsmParser &Parser = getParser();
3645 if (Parser.getTok().isNot(AsmToken::Slash))
3646 return MatchOperand_Success;
3647
3648 // But when they do they shouldn't have an element type suffix.
3649 if (!Kind.empty()) {
3650 Error(S, "not expecting size suffix");
3651 return MatchOperand_ParseFail;
3652 }
3653
3654 // Add a literal slash as operand
3655 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
3656
3657 Parser.Lex(); // Eat the slash.
3658
3659 // Zeroing or merging?
3660 auto Pred = Parser.getTok().getString().lower();
3661 if (Pred != "z" && Pred != "m") {
3662 Error(getLoc(), "expecting 'm' or 'z' predication");
3663 return MatchOperand_ParseFail;
3664 }
3665
3666 // Add zero/merge token.
3667 const char *ZM = Pred == "z" ? "z" : "m";
3668 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
3669
3670 Parser.Lex(); // Eat zero/merge token.
3671 return MatchOperand_Success;
3672}
3673
3674/// parseRegister - Parse a register operand.
3675bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3676 // Try for a Neon vector register.
3677 if (!tryParseNeonVectorRegister(Operands))
3678 return false;
3679
3680 // Otherwise try for a scalar register.
3681 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3682 return false;
3683
3684 return true;
3685}
3686
3687bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3688 MCAsmParser &Parser = getParser();
3689 bool HasELFModifier = false;
3690 AArch64MCExpr::VariantKind RefKind;
3691
3692 if (parseOptionalToken(AsmToken::Colon)) {
3693 HasELFModifier = true;
3694
3695 if (Parser.getTok().isNot(AsmToken::Identifier))
3696 return TokError("expect relocation specifier in operand after ':'");
3697
3698 std::string LowerCase = Parser.getTok().getIdentifier().lower();
3699 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3700 .Case("lo12", AArch64MCExpr::VK_LO12)
3701 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3702 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3703 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3704 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3705 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3706 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3707 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3708 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3709 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3710 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3711 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3712 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3713 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3714 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3715 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3716 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3717 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3718 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3719 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3720 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3721 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3722 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3723 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3724 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3725 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3726 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3727 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3728 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3729 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3730 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3731 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3732 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3733 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3734 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3735 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3736 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3737 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
3738 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3739 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3740 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3741 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3742 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3743 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3744 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3745 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3746 .Default(AArch64MCExpr::VK_INVALID);
3747
3748 if (RefKind == AArch64MCExpr::VK_INVALID)
3749 return TokError("expect relocation specifier in operand after ':'");
3750
3751 Parser.Lex(); // Eat identifier
3752
3753 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3754 return true;
3755 }
3756
3757 if (getParser().parseExpression(ImmVal))
3758 return true;
3759
3760 if (HasELFModifier)
3761 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3762
3763 return false;
3764}
3765
3766template <RegKind VectorKind>
3767OperandMatchResultTy
3768AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3769 bool ExpectMatch) {
3770 MCAsmParser &Parser = getParser();
3771 if (!Parser.getTok().is(AsmToken::LCurly))
3772 return MatchOperand_NoMatch;
3773
3774 // Wrapper around parse function
3775 auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3776 bool NoMatchIsError) {
3777 auto RegTok = Parser.getTok();
3778 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3779 if (ParseRes == MatchOperand_Success) {
3780 if (parseVectorKind(Kind, VectorKind))
3781 return ParseRes;
3782 llvm_unreachable("Expected a valid vector kind")::llvm::llvm_unreachable_internal("Expected a valid vector kind"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3782)
;
3783 }
3784
3785 if (RegTok.isNot(AsmToken::Identifier) ||
3786 ParseRes == MatchOperand_ParseFail ||
3787 (ParseRes == MatchOperand_NoMatch && NoMatchIsError &&
3788 !RegTok.getString().startswith_insensitive("za"))) {
3789 Error(Loc, "vector register expected");
3790 return MatchOperand_ParseFail;
3791 }
3792
3793 return MatchOperand_NoMatch;
3794 };
3795
3796 SMLoc S = getLoc();
3797 auto LCurly = Parser.getTok();
3798 Parser.Lex(); // Eat left bracket token.
3799
3800 StringRef Kind;
3801 unsigned FirstReg;
3802 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3803
3804 // Put back the original left bracket if there was no match, so that
3805 // different types of list-operands can be matched (e.g. SVE, Neon).
3806 if (ParseRes == MatchOperand_NoMatch)
3807 Parser.getLexer().UnLex(LCurly);
3808
3809 if (ParseRes != MatchOperand_Success)
3810 return ParseRes;
3811
3812 int64_t PrevReg = FirstReg;
3813 unsigned Count = 1;
3814
3815 if (parseOptionalToken(AsmToken::Minus)) {
3816 SMLoc Loc = getLoc();
3817 StringRef NextKind;
3818
3819 unsigned Reg;
3820 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3821 if (ParseRes != MatchOperand_Success)
3822 return ParseRes;
3823
3824 // Any Kind suffices must match on all regs in the list.
3825 if (Kind != NextKind) {
3826 Error(Loc, "mismatched register size suffix");
3827 return MatchOperand_ParseFail;
3828 }
3829
3830 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3831
3832 if (Space == 0 || Space > 3) {
3833 Error(Loc, "invalid number of vectors");
3834 return MatchOperand_ParseFail;
3835 }
3836
3837 Count += Space;
3838 }
3839 else {
3840 while (parseOptionalToken(AsmToken::Comma)) {
3841 SMLoc Loc = getLoc();
3842 StringRef NextKind;
3843 unsigned Reg;
3844 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3845 if (ParseRes != MatchOperand_Success)
3846 return ParseRes;
3847
3848 // Any Kind suffices must match on all regs in the list.
3849 if (Kind != NextKind) {
3850 Error(Loc, "mismatched register size suffix");
3851 return MatchOperand_ParseFail;
3852 }
3853
3854 // Registers must be incremental (with wraparound at 31)
3855 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3856 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3857 Error(Loc, "registers must be sequential");
3858 return MatchOperand_ParseFail;
3859 }
3860
3861 PrevReg = Reg;
3862 ++Count;
3863 }
3864 }
3865
3866 if (parseToken(AsmToken::RCurly, "'}' expected"))
3867 return MatchOperand_ParseFail;
3868
3869 if (Count > 4) {
3870 Error(S, "invalid number of vectors");
3871 return MatchOperand_ParseFail;
3872 }
3873
3874 unsigned NumElements = 0;
3875 unsigned ElementWidth = 0;
3876 if (!Kind.empty()) {
3877 if (const auto &VK = parseVectorKind(Kind, VectorKind))
3878 std::tie(NumElements, ElementWidth) = *VK;
3879 }
3880
3881 Operands.push_back(AArch64Operand::CreateVectorList(
3882 FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3883 getContext()));
3884
3885 return MatchOperand_Success;
3886}
3887
3888/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3889bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3890 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3891 if (ParseRes != MatchOperand_Success)
3892 return true;
3893
3894 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3895}
3896
3897OperandMatchResultTy
3898AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3899 SMLoc StartLoc = getLoc();
3900
3901 unsigned RegNum;
3902 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3903 if (Res != MatchOperand_Success)
3904 return Res;
3905
3906 if (!parseOptionalToken(AsmToken::Comma)) {
3907 Operands.push_back(AArch64Operand::CreateReg(
3908 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3909 return MatchOperand_Success;
3910 }
3911
3912 parseOptionalToken(AsmToken::Hash);
3913
3914 if (getParser().getTok().isNot(AsmToken::Integer)) {
3915 Error(getLoc(), "index must be absent or #0");
3916 return MatchOperand_ParseFail;
3917 }
3918
3919 const MCExpr *ImmVal;
3920 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3921 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3922 Error(getLoc(), "index must be absent or #0");
3923 return MatchOperand_ParseFail;
3924 }
3925
3926 Operands.push_back(AArch64Operand::CreateReg(
3927 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3928 return MatchOperand_Success;
3929}
3930
3931template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3932OperandMatchResultTy
3933AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3934 SMLoc StartLoc = getLoc();
3935
3936 unsigned RegNum;
3937 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3938 if (Res != MatchOperand_Success)
3939 return Res;
3940
3941 // No shift/extend is the default.
3942 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3943 Operands.push_back(AArch64Operand::CreateReg(
3944 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3945 return MatchOperand_Success;
3946 }
3947
3948 // Eat the comma
3949 getParser().Lex();
3950
3951 // Match the shift
3952 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
3953 Res = tryParseOptionalShiftExtend(ExtOpnd);
3954 if (Res != MatchOperand_Success)
3955 return Res;
3956
3957 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3958 Operands.push_back(AArch64Operand::CreateReg(
3959 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3960 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3961 Ext->hasShiftExtendAmount()));
3962
3963 return MatchOperand_Success;
3964}
3965
3966bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3967 MCAsmParser &Parser = getParser();
3968
3969 // Some SVE instructions have a decoration after the immediate, i.e.
3970 // "mul vl". We parse them here and add tokens, which must be present in the
3971 // asm string in the tablegen instruction.
3972 bool NextIsVL =
3973 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
3974 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3975 if (!Parser.getTok().getString().equals_insensitive("mul") ||
3976 !(NextIsVL || NextIsHash))
3977 return true;
3978
3979 Operands.push_back(
3980 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
3981 Parser.Lex(); // Eat the "mul"
3982
3983 if (NextIsVL) {
3984 Operands.push_back(
3985 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
3986 Parser.Lex(); // Eat the "vl"
3987 return false;
3988 }
3989
3990 if (NextIsHash) {
3991 Parser.Lex(); // Eat the #
3992 SMLoc S = getLoc();
3993
3994 // Parse immediate operand.
3995 const MCExpr *ImmVal;
3996 if (!Parser.parseExpression(ImmVal))
3997 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3998 Operands.push_back(AArch64Operand::CreateImm(
3999 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4000 getContext()));
4001 return MatchOperand_Success;
4002 }
4003 }
4004
4005 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4006}
4007
4008bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4009 MCAsmParser &Parser = getParser();
4010 auto Tok = Parser.getTok();
4011 if (Tok.isNot(AsmToken::Identifier))
4012 return true;
4013
4014 auto Keyword = Tok.getString();
4015 Keyword = StringSwitch<StringRef>(Keyword.lower())
4016 .Case("sm", "sm")
4017 .Case("za", "za")
4018 .Default(Keyword);
4019 Operands.push_back(
4020 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4021
4022 Parser.Lex();
4023 return false;
4024}
4025
4026/// parseOperand - Parse a arm instruction operand. For now this parses the
4027/// operand regardless of the mnemonic.
4028bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4029 bool invertCondCode) {
4030 MCAsmParser &Parser = getParser();
4031
4032 OperandMatchResultTy ResTy =
4033 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
4034
4035 // Check if the current operand has a custom associated parser, if so, try to
4036 // custom parse the operand, or fallback to the general approach.
4037 if (ResTy == MatchOperand_Success)
4038 return false;
4039 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4040 // there was a match, but an error occurred, in which case, just return that
4041 // the operand parsing failed.
4042 if (ResTy == MatchOperand_ParseFail)
4043 return true;
4044
4045 // Nothing custom, so do general case parsing.
4046 SMLoc S, E;
4047 switch (getLexer().getKind()) {
4048 default: {
4049 SMLoc S = getLoc();
4050 const MCExpr *Expr;
4051 if (parseSymbolicImmVal(Expr))
4052 return Error(S, "invalid operand");
4053
4054 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4055 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4056 return false;
4057 }
4058 case AsmToken::LBrac: {
4059 Operands.push_back(
4060 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4061 Parser.Lex(); // Eat '['
4062
4063 // There's no comma after a '[', so we can parse the next operand
4064 // immediately.
4065 return parseOperand(Operands, false, false);
4066 }
4067 case AsmToken::LCurly: {
4068 if (!parseNeonVectorList(Operands))
4069 return false;
4070
4071 Operands.push_back(
4072 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4073 Parser.Lex(); // Eat '{'
4074
4075 // There's no comma after a '{', so we can parse the next operand
4076 // immediately.
4077 return parseOperand(Operands, false, false);
4078 }
4079 case AsmToken::Identifier: {
4080 // If we're expecting a Condition Code operand, then just parse that.
4081 if (isCondCode)
4082 return parseCondCode(Operands, invertCondCode);
4083
4084 // If it's a register name, parse it.
4085 if (!parseRegister(Operands))
4086 return false;
4087
4088 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4089 // by SVE instructions.
4090 if (!parseOptionalMulOperand(Operands))
4091 return false;
4092
4093 // If this is an "smstart" or "smstop" instruction, parse its special
4094 // keyword operand as an identifier.
4095 if (Mnemonic == "smstart" || Mnemonic == "smstop")
4096 return parseKeywordOperand(Operands);
4097
4098 // This could be an optional "shift" or "extend" operand.
4099 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
4100 // We can only continue if no tokens were eaten.
4101 if (GotShift != MatchOperand_NoMatch)
4102 return GotShift;
4103
4104 // If this is a two-word mnemonic, parse its special keyword
4105 // operand as an identifier.
4106 if (Mnemonic == "brb")
4107 return parseKeywordOperand(Operands);
4108
4109 // This was not a register so parse other operands that start with an
4110 // identifier (like labels) as expressions and create them as immediates.
4111 const MCExpr *IdVal;
4112 S = getLoc();
4113 if (getParser().parseExpression(IdVal))
4114 return true;
4115 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4116 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4117 return false;
4118 }
4119 case AsmToken::Integer:
4120 case AsmToken::Real:
4121 case AsmToken::Hash: {
4122 // #42 -> immediate.
4123 S = getLoc();
4124
4125 parseOptionalToken(AsmToken::Hash);
4126
4127 // Parse a negative sign
4128 bool isNegative = false;
4129 if (Parser.getTok().is(AsmToken::Minus)) {
4130 isNegative = true;
4131 // We need to consume this token only when we have a Real, otherwise
4132 // we let parseSymbolicImmVal take care of it
4133 if (Parser.getLexer().peekTok().is(AsmToken::Real))
4134 Parser.Lex();
4135 }
4136
4137 // The only Real that should come through here is a literal #0.0 for
4138 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4139 // so convert the value.
4140 const AsmToken &Tok = Parser.getTok();
4141 if (Tok.is(AsmToken::Real)) {
4142 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4143 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4144 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4145 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4146 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4147 return TokError("unexpected floating point literal");
4148 else if (IntVal != 0 || isNegative)
4149 return TokError("expected floating-point constant #0.0");
4150 Parser.Lex(); // Eat the token.
4151
4152 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4153 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4154 return false;
4155 }
4156
4157 const MCExpr *ImmVal;
4158 if (parseSymbolicImmVal(ImmVal))
4159 return true;
4160
4161 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4162 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4163 return false;
4164 }
4165 case AsmToken::Equal: {
4166 SMLoc Loc = getLoc();
4167 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4168 return TokError("unexpected token in operand");
4169 Parser.Lex(); // Eat '='
4170 const MCExpr *SubExprVal;
4171 if (getParser().parseExpression(SubExprVal))
4172 return true;
4173
4174 if (Operands.size() < 2 ||
4175 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4176 return Error(Loc, "Only valid when first operand is register");
4177
4178 bool IsXReg =
4179 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4180 Operands[1]->getReg());
4181
4182 MCContext& Ctx = getContext();
4183 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4184 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4185 if (isa<MCConstantExpr>(SubExprVal)) {
4186 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4187 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4188 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
4189 ShiftAmt += 16;
4190 Imm >>= 16;
4191 }
4192 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4193 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4194 Operands.push_back(AArch64Operand::CreateImm(
4195 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4196 if (ShiftAmt)
4197 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4198 ShiftAmt, true, S, E, Ctx));
4199 return false;
4200 }
4201 APInt Simm = APInt(64, Imm << ShiftAmt);
4202 // check if the immediate is an unsigned or signed 32-bit int for W regs
4203 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4204 return Error(Loc, "Immediate too large for register");
4205 }
4206 // If it is a label or an imm that cannot fit in a movz, put it into CP.
4207 const MCExpr *CPLoc =
4208 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4209 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4210 return false;
4211 }
4212 }
4213}
4214
4215bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4216 const MCExpr *Expr = nullptr;
4217 SMLoc L = getLoc();
4218 if (check(getParser().parseExpression(Expr), L, "expected expression"))
4219 return true;
4220 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4221 if (check(!Value, L, "expected constant expression"))
4222 return true;
4223 Out = Value->getValue();
4224 return false;
4225}
4226
4227bool AArch64AsmParser::parseComma() {
4228 if (check(getParser().getTok().isNot(AsmToken::Comma), getLoc(),
4229 "expected comma"))
4230 return true;
4231 // Eat the comma
4232 getParser().Lex();
4233 return false;
4234}
4235
4236bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
4237 unsigned First, unsigned Last) {
4238 unsigned Reg;
25
'Reg' declared without an initial value
4239 SMLoc Start, End;
4240 if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register"))
26
Calling 'AArch64AsmParser::ParseRegister'
35
Returning from 'AArch64AsmParser::ParseRegister'
36
Assuming the condition is false
37
Taking false branch
4241 return true;
4242
4243 // Special handling for FP and LR; they aren't linearly after x28 in
4244 // the registers enum.
4245 unsigned RangeEnd = Last;
4246 if (Base
37.1
'Base' is equal to X0
== AArch64::X0) {
38
Taking true branch
4247 if (Last
38.1
'Last' is equal to FP
== AArch64::FP) {
39
Taking true branch
4248 RangeEnd = AArch64::X28;
4249 if (Reg == AArch64::FP) {
40
The left operand of '==' is a garbage value
4250 Out = 29;
4251 return false;
4252 }
4253 }
4254 if (Last == AArch64::LR) {
4255 RangeEnd = AArch64::X28;
4256 if (Reg == AArch64::FP) {
4257 Out = 29;
4258 return false;
4259 } else if (Reg == AArch64::LR) {
4260 Out = 30;
4261 return false;
4262 }
4263 }
4264 }
4265
4266 if (check(Reg < First || Reg > RangeEnd, Start,
4267 Twine("expected register in range ") +
4268 AArch64InstPrinter::getRegisterName(First) + " to " +
4269 AArch64InstPrinter::getRegisterName(Last)))
4270 return true;
4271 Out = Reg - Base;
4272 return false;
4273}
4274
4275bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
4276 const MCParsedAsmOperand &Op2) const {
4277 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
4278 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
4279 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
4280 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
4281 return MCTargetAsmParser::regsEqual(Op1, Op2);
4282
4283 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&(static_cast <bool> (AOp1.isScalarReg() && AOp2
.isScalarReg() && "Testing equality of non-scalar registers not supported"
) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4284, __extension__ __PRETTY_FUNCTION__))
4284 "Testing equality of non-scalar registers not supported")(static_cast <bool> (AOp1.isScalarReg() && AOp2
.isScalarReg() && "Testing equality of non-scalar registers not supported"
) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4284, __extension__ __PRETTY_FUNCTION__))
;
4285
4286 // Check if a registers match their sub/super register classes.
4287 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
4288 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
4289 if (AOp1.getRegEqualityTy() == EqualsSubReg)
4290 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
4291 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
4292 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
4293 if (AOp2.getRegEqualityTy() == EqualsSubReg)
4294 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
4295
4296 return false;
4297}
4298
4299/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
4300/// operands.
4301bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
4302 StringRef Name, SMLoc NameLoc,
4303 OperandVector &Operands) {
4304 MCAsmParser &Parser = getParser();
4305 Name = StringSwitch<StringRef>(Name.lower())
4306 .Case("beq", "b.eq")
4307 .Case("bne", "b.ne")
4308 .Case("bhs", "b.hs")
4309 .Case("bcs", "b.cs")
4310 .Case("blo", "b.lo")
4311 .Case("bcc", "b.cc")
4312 .Case("bmi", "b.mi")
4313 .Case("bpl", "b.pl")
4314 .Case("bvs", "b.vs")
4315 .Case("bvc", "b.vc")
4316 .Case("bhi", "b.hi")
4317 .Case("bls", "b.ls")
4318 .Case("bge", "b.ge")
4319 .Case("blt", "b.lt")
4320 .Case("bgt", "b.gt")
4321 .Case("ble", "b.le")
4322 .Case("bal", "b.al")
4323 .Case("bnv", "b.nv")
4324 .Default(Name);
4325
4326 // First check for the AArch64-specific .req directive.
4327 if (Parser.getTok().is(AsmToken::Identifier) &&
4328 Parser.getTok().getIdentifier().lower() == ".req") {
4329 parseDirectiveReq(Name, NameLoc);
4330 // We always return 'error' for this, as we're done with this
4331 // statement and don't need to match the 'instruction."
4332 return true;
4333 }
4334
4335 // Create the leading tokens for the mnemonic, split by '.' characters.
4336 size_t Start = 0, Next = Name.find('.');
4337 StringRef Head = Name.slice(Start, Next);
4338
4339 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
4340 // the SYS instruction.
4341 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
4342 Head == "cfp" || Head == "dvp" || Head == "cpp")
4343 return parseSysAlias(Head, NameLoc, Operands);
4344
4345 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
4346 Mnemonic = Head;
4347
4348 // Handle condition codes for a branch mnemonic
4349 if (Head == "b" && Next != StringRef::npos) {
4350 Start = Next;
4351 Next = Name.find('.', Start + 1);
4352 Head = Name.slice(Start + 1, Next);
4353
4354 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4355 (Head.data() - Name.data()));
4356 AArch64CC::CondCode CC = parseCondCodeString(Head);
4357 if (CC == AArch64CC::Invalid)
4358 return Error(SuffixLoc, "invalid condition code");
4359 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
4360 /*IsSuffix=*/true));
4361 Operands.push_back(
4362 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
4363 }
4364
4365 // Add the remaining tokens in the mnemonic.
4366 while (Next != StringRef::npos) {
4367 Start = Next;
4368 Next = Name.find('.', Start + 1);
4369 Head = Name.slice(Start, Next);
4370 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4371 (Head.data() - Name.data()) + 1);
4372 Operands.push_back(AArch64Operand::CreateToken(
4373 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
4374 }
4375
4376 // Conditional compare instructions have a Condition Code operand, which needs
4377 // to be parsed and an immediate operand created.
4378 bool condCodeFourthOperand =
4379 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
4380 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
4381 Head == "csinc" || Head == "csinv" || Head == "csneg");
4382
4383 // These instructions are aliases to some of the conditional select
4384 // instructions. However, the condition code is inverted in the aliased
4385 // instruction.
4386 //
4387 // FIXME: Is this the correct way to handle these? Or should the parser
4388 // generate the aliased instructions directly?
4389 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
4390 bool condCodeThirdOperand =
4391 (Head == "cinc" || Head == "cinv" || Head == "cneg");
4392
4393 // Read the remaining operands.
4394 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4395
4396 unsigned N = 1;
4397 do {
4398 // Parse and remember the operand.
4399 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
4400 (N == 3 && condCodeThirdOperand) ||
4401 (N == 2 && condCodeSecondOperand),
4402 condCodeSecondOperand || condCodeThirdOperand)) {
4403 return true;
4404 }
4405
4406 // After successfully parsing some operands there are three special cases
4407 // to consider (i.e. notional operands not separated by commas). Two are
4408 // due to memory specifiers:
4409 // + An RBrac will end an address for load/store/prefetch
4410 // + An '!' will indicate a pre-indexed operation.
4411 //
4412 // And a further case is '}', which ends a group of tokens specifying the
4413 // SME accumulator array 'ZA' or tile vector, i.e.
4414 //
4415 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
4416 //
4417 // It's someone else's responsibility to make sure these tokens are sane
4418 // in the given context!
4419
4420 if (parseOptionalToken(AsmToken::RBrac))
4421 Operands.push_back(
4422 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4423 if (parseOptionalToken(AsmToken::Exclaim))
4424 Operands.push_back(
4425 AArch64Operand::CreateToken("!", getLoc(), getContext()));
4426 if (parseOptionalToken(AsmToken::RCurly))
4427 Operands.push_back(
4428 AArch64Operand::CreateToken("}", getLoc(), getContext()));
4429
4430 ++N;
4431 } while (parseOptionalToken(AsmToken::Comma));
4432 }
4433
4434 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4435 return true;
4436
4437 return false;
4438}
4439
4440static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
4441 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31))(static_cast <bool> ((ZReg >= AArch64::Z0) &&
(ZReg <= AArch64::Z31)) ? void (0) : __assert_fail ("(ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4441, __extension__ __PRETTY_FUNCTION__))
;
4442 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
4443 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
4444 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
4445 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
4446 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
4447 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
4448}
4449
4450// FIXME: This entire function is a giant hack to provide us with decent
4451// operand range validation/diagnostics until TableGen/MC can be extended
4452// to support autogeneration of this kind of validation.
4453bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
4454 SmallVectorImpl<SMLoc> &Loc) {
4455 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4456 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
4457
4458 // A prefix only applies to the instruction following it. Here we extract
4459 // prefix information for the next instruction before validating the current
4460 // one so that in the case of failure we don't erronously continue using the
4461 // current prefix.
4462 PrefixInfo Prefix = NextPrefix;
4463 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
4464
4465 // Before validating the instruction in isolation we run through the rules
4466 // applicable when it follows a prefix instruction.
4467 // NOTE: brk & hlt can be prefixed but require no additional validation.
4468 if (Prefix.isActive() &&
4469 (Inst.getOpcode() != AArch64::BRK) &&
4470 (Inst.getOpcode() != AArch64::HLT)) {
4471
4472 // Prefixed intructions must have a destructive operand.
4473 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
4474 AArch64::NotDestructive)
4475 return Error(IDLoc, "instruction is unpredictable when following a"
4476 " movprfx, suggest replacing movprfx with mov");
4477
4478 // Destination operands must match.
4479 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
4480 return Error(Loc[0], "instruction is unpredictable when following a"
4481 " movprfx writing to a different destination");
4482
4483 // Destination operand must not be used in any other location.
4484 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
4485 if (Inst.getOperand(i).isReg() &&
4486 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
4487 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
4488 return Error(Loc[0], "instruction is unpredictable when following a"
4489 " movprfx and destination also used as non-destructive"
4490 " source");
4491 }
4492
4493 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
4494 if (Prefix.isPredicated()) {
4495 int PgIdx = -1;
4496
4497 // Find the instructions general predicate.
4498 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
4499 if (Inst.getOperand(i).isReg() &&
4500 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
4501 PgIdx = i;
4502 break;
4503 }
4504
4505 // Instruction must be predicated if the movprfx is predicated.
4506 if (PgIdx == -1 ||
4507 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
4508 return Error(IDLoc, "instruction is unpredictable when following a"
4509 " predicated movprfx, suggest using unpredicated movprfx");
4510
4511 // Instruction must use same general predicate as the movprfx.
4512 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
4513 return Error(IDLoc, "instruction is unpredictable when following a"
4514 " predicated movprfx using a different general predicate");
4515
4516 // Instruction element type must match the movprfx.
4517 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
4518 return Error(IDLoc, "instruction is unpredictable when following a"
4519 " predicated movprfx with a different element size");
4520 }
4521 }
4522
4523 // Check for indexed addressing modes w/ the base register being the
4524 // same as a destination/source register or pair load where
4525 // the Rt == Rt2. All of those are undefined behaviour.
4526 switch (Inst.getOpcode()) {
4527 case AArch64::LDPSWpre:
4528 case AArch64::LDPWpost:
4529 case AArch64::LDPWpre:
4530 case AArch64::LDPXpost:
4531 case AArch64::LDPXpre: {
4532 unsigned Rt = Inst.getOperand(1).getReg();
4533 unsigned Rt2 = Inst.getOperand(2).getReg();
4534 unsigned Rn = Inst.getOperand(3).getReg();
4535 if (RI->isSubRegisterEq(Rn, Rt))
4536 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
4537 "is also a destination");
4538 if (RI->isSubRegisterEq(Rn, Rt2))
4539 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
4540 "is also a destination");
4541 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4542 }
4543 case AArch64::LDPDi:
4544 case AArch64::LDPQi:
4545 case AArch64::LDPSi:
4546 case AArch64::LDPSWi:
4547 case AArch64::LDPWi:
4548 case AArch64::LDPXi: {
4549 unsigned Rt = Inst.getOperand(0).getReg();
4550 unsigned Rt2 = Inst.getOperand(1).getReg();
4551 if (Rt == Rt2)
4552 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4553 break;
4554 }
4555 case AArch64::LDPDpost:
4556 case AArch64::LDPDpre:
4557 case AArch64::LDPQpost:
4558 case AArch64::LDPQpre:
4559 case AArch64::LDPSpost:
4560 case AArch64::LDPSpre:
4561 case AArch64::LDPSWpost: {
4562 unsigned Rt = Inst.getOperand(1).getReg();
4563 unsigned Rt2 = Inst.getOperand(2).getReg();
4564 if (Rt == Rt2)
4565 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4566 break;
4567 }
4568 case AArch64::STPDpost:
4569 case AArch64::STPDpre:
4570 case AArch64::STPQpost:
4571 case AArch64::STPQpre:
4572 case AArch64::STPSpost:
4573 case AArch64::STPSpre:
4574 case AArch64::STPWpost:
4575 case AArch64::STPWpre:
4576 case AArch64::STPXpost:
4577 case AArch64::STPXpre: {
4578 unsigned Rt = Inst.getOperand(1).getReg();
4579 unsigned Rt2 = Inst.getOperand(2).getReg();
4580 unsigned Rn = Inst.getOperand(3).getReg();
4581 if (RI->isSubRegisterEq(Rn, Rt))
4582 return Error(Loc[0], "unpredictable STP instruction, writeback base "
4583 "is also a source");
4584 if (RI->isSubRegisterEq(Rn, Rt2))
4585 return Error(Loc[1], "unpredictable STP instruction, writeback base "
4586 "is also a source");
4587 break;
4588 }
4589 case AArch64::LDRBBpre:
4590 case AArch64::LDRBpre:
4591 case AArch64::LDRHHpre:
4592 case AArch64::LDRHpre:
4593 case AArch64::LDRSBWpre:
4594 case AArch64::LDRSBXpre:
4595 case AArch64::LDRSHWpre:
4596 case AArch64::LDRSHXpre:
4597 case AArch64::LDRSWpre:
4598 case AArch64::LDRWpre:
4599 case AArch64::LDRXpre:
4600 case AArch64::LDRBBpost:
4601 case AArch64::LDRBpost:
4602 case AArch64::LDRHHpost:
4603 case AArch64::LDRHpost:
4604 case AArch64::LDRSBWpost:
4605 case AArch64::LDRSBXpost:
4606 case AArch64::LDRSHWpost:
4607 case AArch64::LDRSHXpost:
4608 case AArch64::LDRSWpost:
4609 case AArch64::LDRWpost:
4610 case AArch64::LDRXpost: {
4611 unsigned Rt = Inst.getOperand(1).getReg();
4612 unsigned Rn = Inst.getOperand(2).getReg();
4613 if (RI->isSubRegisterEq(Rn, Rt))
4614 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4615 "is also a source");
4616 break;
4617 }
4618 case AArch64::STRBBpost:
4619 case AArch64::STRBpost:
4620 case AArch64::STRHHpost:
4621 case AArch64::STRHpost:
4622 case AArch64::STRWpost:
4623 case AArch64::STRXpost:
4624 case AArch64::STRBBpre:
4625 case AArch64::STRBpre:
4626 case AArch64::STRHHpre:
4627 case AArch64::STRHpre:
4628 case AArch64::STRWpre:
4629 case AArch64::STRXpre: {
4630 unsigned Rt = Inst.getOperand(1).getReg();
4631 unsigned Rn = Inst.getOperand(2).getReg();
4632 if (RI->isSubRegisterEq(Rn, Rt))
4633 return Error(Loc[0], "unpredictable STR instruction, writeback base "
4634 "is also a source");
4635 break;
4636 }
4637 case AArch64::STXRB:
4638 case AArch64::STXRH:
4639 case AArch64::STXRW:
4640 case AArch64::STXRX:
4641 case AArch64::STLXRB:
4642 case AArch64::STLXRH:
4643 case AArch64::STLXRW:
4644 case AArch64::STLXRX: {
4645 unsigned Rs = Inst.getOperand(0).getReg();
4646 unsigned Rt = Inst.getOperand(1).getReg();
4647 unsigned Rn = Inst.getOperand(2).getReg();
4648 if (RI->isSubRegisterEq(Rt, Rs) ||
4649 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4650 return Error(Loc[0],
4651 "unpredictable STXR instruction, status is also a source");
4652 break;
4653 }
4654 case AArch64::STXPW:
4655 case AArch64::STXPX:
4656 case AArch64::STLXPW:
4657 case AArch64::STLXPX: {
4658 unsigned Rs = Inst.getOperand(0).getReg();
4659 unsigned Rt1 = Inst.getOperand(1).getReg();
4660 unsigned Rt2 = Inst.getOperand(2).getReg();
4661 unsigned Rn = Inst.getOperand(3).getReg();
4662 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4663 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4664 return Error(Loc[0],
4665 "unpredictable STXP instruction, status is also a source");
4666 break;
4667 }
4668 case AArch64::LDRABwriteback:
4669 case AArch64::LDRAAwriteback: {
4670 unsigned Xt = Inst.getOperand(0).getReg();
4671 unsigned Xn = Inst.getOperand(1).getReg();
4672 if (Xt == Xn)
4673 return Error(Loc[0],
4674 "unpredictable LDRA instruction, writeback base"
4675 " is also a destination");
4676 break;
4677 }
4678 }
4679
4680
4681 // Now check immediate ranges. Separate from the above as there is overlap
4682 // in the instructions being checked and this keeps the nested conditionals
4683 // to a minimum.
4684 switch (Inst.getOpcode()) {
4685 case AArch64::ADDSWri:
4686 case AArch64::ADDSXri:
4687 case AArch64::ADDWri:
4688 case AArch64::ADDXri:
4689 case AArch64::SUBSWri:
4690 case AArch64::SUBSXri:
4691 case AArch64::SUBWri:
4692 case AArch64::SUBXri: {
4693 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4694 // some slight duplication here.
4695 if (Inst.getOperand(2).isExpr()) {
4696 const MCExpr *Expr = Inst.getOperand(2).getExpr();
4697 AArch64MCExpr::VariantKind ELFRefKind;
4698 MCSymbolRefExpr::VariantKind DarwinRefKind;
4699 int64_t Addend;
4700 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4701
4702 // Only allow these with ADDXri.
4703 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4704 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4705 Inst.getOpcode() == AArch64::ADDXri)
4706 return false;
4707
4708 // Only allow these with ADDXri/ADDWri
4709 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4710 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4711 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4712 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4713 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4714 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4715 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4716 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4717 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4718 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4719 (Inst.getOpcode() == AArch64::ADDXri ||
4720 Inst.getOpcode() == AArch64::ADDWri))
4721 return false;
4722
4723 // Don't allow symbol refs in the immediate field otherwise
4724 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4725 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4726 // 'cmp w0, 'borked')
4727 return Error(Loc.back(), "invalid immediate expression");
4728 }
4729 // We don't validate more complex expressions here
4730 }
4731 return false;
4732 }
4733 default:
4734 return false;
4735 }
4736}
4737
4738static std::string AArch64MnemonicSpellCheck(StringRef S,
4739 const FeatureBitset &FBS,
4740 unsigned VariantID = 0);
4741
4742bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4743 uint64_t ErrorInfo,
4744 OperandVector &Operands) {
4745 switch (ErrCode) {
4746 case Match_InvalidTiedOperand: {
4747 RegConstraintEqualityTy EqTy =
4748 static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4749 .getRegEqualityTy();
4750 switch (EqTy) {
4751 case RegConstraintEqualityTy::EqualsSubReg:
4752 return Error(Loc, "operand must be 64-bit form of destination register");
4753 case RegConstraintEqualityTy::EqualsSuperReg:
4754 return Error(Loc, "operand must be 32-bit form of destination register");
4755 case RegConstraintEqualityTy::EqualsReg:
4756 return Error(Loc, "operand must match destination register");
4757 }
4758 llvm_unreachable("Unknown RegConstraintEqualityTy")::llvm::llvm_unreachable_internal("Unknown RegConstraintEqualityTy"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4758)
;
4759 }
4760 case Match_MissingFeature:
4761 return Error(Loc,
4762 "instruction requires a CPU feature not currently enabled");
4763 case Match_InvalidOperand:
4764 return Error(Loc, "invalid operand for instruction");
4765 case Match_InvalidSuffix:
4766 return Error(Loc, "invalid type suffix for instruction");
4767 case Match_InvalidCondCode:
4768 return Error(Loc, "expected AArch64 condition code");
4769 case Match_AddSubRegExtendSmall:
4770 return Error(Loc,
4771 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4772 case Match_AddSubRegExtendLarge:
4773 return Error(Loc,
4774 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4775 case Match_AddSubSecondSource:
4776 return Error(Loc,
4777 "expected compatible register, symbol or integer in range [0, 4095]");
4778 case Match_LogicalSecondSource:
4779 return Error(Loc, "expected compatible register or logical immediate");
4780 case Match_InvalidMovImm32Shift:
4781 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4782 case Match_InvalidMovImm64Shift:
4783 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4784 case Match_AddSubRegShift32:
4785 return Error(Loc,
4786 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4787 case Match_AddSubRegShift64:
4788 return Error(Loc,
4789 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4790 case Match_InvalidFPImm:
4791 return Error(Loc,
4792 "expected compatible register or floating-point constant");
4793 case Match_InvalidMemoryIndexedSImm6:
4794 return Error(Loc, "index must be an integer in range [-32, 31].");
4795 case Match_InvalidMemoryIndexedSImm5:
4796 return Error(Loc, "index must be an integer in range [-16, 15].");
4797 case Match_InvalidMemoryIndexed1SImm4:
4798 return Error(Loc, "index must be an integer in range [-8, 7].");
4799 case Match_InvalidMemoryIndexed2SImm4:
4800 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4801 case Match_InvalidMemoryIndexed3SImm4:
4802 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4803 case Match_InvalidMemoryIndexed4SImm4:
4804 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4805 case Match_InvalidMemoryIndexed16SImm4:
4806 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4807 case Match_InvalidMemoryIndexed32SImm4:
4808 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
4809 case Match_InvalidMemoryIndexed1SImm6:
4810 return Error(Loc, "index must be an integer in range [-32, 31].");
4811 case Match_InvalidMemoryIndexedSImm8:
4812 return Error(Loc, "index must be an integer in range [-128, 127].");
4813 case Match_InvalidMemoryIndexedSImm9:
4814 return Error(Loc, "index must be an integer in range [-256, 255].");
4815 case Match_InvalidMemoryIndexed16SImm9:
4816 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4817 case Match_InvalidMemoryIndexed8SImm10:
4818 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4819 case Match_InvalidMemoryIndexed4SImm7:
4820 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4821 case Match_InvalidMemoryIndexed8SImm7:
4822 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4823 case Match_InvalidMemoryIndexed16SImm7:
4824 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4825 case Match_InvalidMemoryIndexed8UImm5:
4826 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4827 case Match_InvalidMemoryIndexed4UImm5:
4828 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4829 case Match_InvalidMemoryIndexed2UImm5:
4830 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4831 case Match_InvalidMemoryIndexed8UImm6:
4832 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4833 case Match_InvalidMemoryIndexed16UImm6:
4834 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4835 case Match_InvalidMemoryIndexed4UImm6:
4836 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4837 case Match_InvalidMemoryIndexed2UImm6:
4838 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4839 case Match_InvalidMemoryIndexed1UImm6:
4840 return Error(Loc, "index must be in range [0, 63].");
4841 case Match_InvalidMemoryWExtend8:
4842 return Error(Loc,
4843 "expected 'uxtw' or 'sxtw' with optional shift of #0");
4844 case Match_InvalidMemoryWExtend16:
4845 return Error(Loc,
4846 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4847 case Match_InvalidMemoryWExtend32:
4848 return Error(Loc,
4849 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4850 case Match_InvalidMemoryWExtend64:
4851 return Error(Loc,
4852 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4853 case Match_InvalidMemoryWExtend128:
4854 return Error(Loc,
4855 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4856 case Match_InvalidMemoryXExtend8:
4857 return Error(Loc,
4858 "expected 'lsl' or 'sxtx' with optional shift of #0");
4859 case Match_InvalidMemoryXExtend16:
4860 return Error(Loc,
4861 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4862 case Match_InvalidMemoryXExtend32:
4863 return Error(Loc,
4864 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4865 case Match_InvalidMemoryXExtend64:
4866 return Error(Loc,
4867 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4868 case Match_InvalidMemoryXExtend128:
4869 return Error(Loc,
4870 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4871 case Match_InvalidMemoryIndexed1:
4872 return Error(Loc, "index must be an integer in range [0, 4095].");
4873 case Match_InvalidMemoryIndexed2:
4874 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4875 case Match_InvalidMemoryIndexed4:
4876 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4877 case Match_InvalidMemoryIndexed8:
4878 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4879 case Match_InvalidMemoryIndexed16:
4880 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4881 case Match_InvalidImm0_1:
4882 return Error(Loc, "immediate must be an integer in range [0, 1].");
4883 case Match_InvalidImm0_3:
4884 return Error(Loc, "immediate must be an integer in range [0, 3].");
4885 case Match_InvalidImm0_7:
4886 return Error(Loc, "immediate must be an integer in range [0, 7].");
4887 case Match_InvalidImm0_15:
4888 return Error(Loc, "immediate must be an integer in range [0, 15].");
4889 case Match_InvalidImm0_31:
4890 return Error(Loc, "immediate must be an integer in range [0, 31].");
4891 case Match_InvalidImm0_63:
4892 return Error(Loc, "immediate must be an integer in range [0, 63].");
4893 case Match_InvalidImm0_127:
4894 return Error(Loc, "immediate must be an integer in range [0, 127].");
4895 case Match_InvalidImm0_255:
4896 return Error(Loc, "immediate must be an integer in range [0, 255].");
4897 case Match_InvalidImm0_65535:
4898 return Error(Loc, "immediate must be an integer in range [0, 65535].");
4899 case Match_InvalidImm1_8:
4900 return Error(Loc, "immediate must be an integer in range [1, 8].");
4901 case Match_InvalidImm1_16:
4902 return Error(Loc, "immediate must be an integer in range [1, 16].");
4903 case Match_InvalidImm1_32:
4904 return Error(Loc, "immediate must be an integer in range [1, 32].");
4905 case Match_InvalidImm1_64:
4906 return Error(Loc, "immediate must be an integer in range [1, 64].");
4907 case Match_InvalidSVEAddSubImm8:
4908 return Error(Loc, "immediate must be an integer in range [0, 255]"
4909 " with a shift amount of 0");
4910 case Match_InvalidSVEAddSubImm16:
4911 case Match_InvalidSVEAddSubImm32:
4912 case Match_InvalidSVEAddSubImm64:
4913 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4914 "multiple of 256 in range [256, 65280]");
4915 case Match_InvalidSVECpyImm8:
4916 return Error(Loc, "immediate must be an integer in range [-128, 255]"
4917 " with a shift amount of 0");
4918 case Match_InvalidSVECpyImm16:
4919 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4920 "multiple of 256 in range [-32768, 65280]");
4921 case Match_InvalidSVECpyImm32:
4922 case Match_InvalidSVECpyImm64:
4923 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4924 "multiple of 256 in range [-32768, 32512]");
4925 case Match_InvalidIndexRange1_1:
4926 return Error(Loc, "expected lane specifier '[1]'");
4927 case Match_InvalidIndexRange0_15:
4928 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4929 case Match_InvalidIndexRange0_7:
4930 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4931 case Match_InvalidIndexRange0_3:
4932 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4933 case Match_InvalidIndexRange0_1:
4934 return Error(Loc, "vector lane must be an integer in range [0, 1].");
4935 case Match_InvalidSVEIndexRange0_63:
4936 return Error(Loc, "vector lane must be an integer in range [0, 63].");
4937 case Match_InvalidSVEIndexRange0_31:
4938 return Error(Loc, "vector lane must be an integer in range [0, 31].");
4939 case Match_InvalidSVEIndexRange0_15:
4940 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4941 case Match_InvalidSVEIndexRange0_7:
4942 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4943 case Match_InvalidSVEIndexRange0_3:
4944 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4945 case Match_InvalidLabel:
4946 return Error(Loc, "expected label or encodable integer pc offset");
4947 case Match_MRS:
4948 return Error(Loc, "expected readable system register");
4949 case Match_MSR:
4950 case Match_InvalidSVCR:
4951 return Error(Loc, "expected writable system register or pstate");
4952 case Match_InvalidComplexRotationEven:
4953 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4954 case Match_InvalidComplexRotationOdd:
4955 return Error(Loc, "complex rotation must be 90 or 270.");
4956 case Match_MnemonicFail: {
4957 std::string Suggestion = AArch64MnemonicSpellCheck(
4958 ((AArch64Operand &)*Operands[0]).getToken(),
4959 ComputeAvailableFeatures(STI->getFeatureBits()));
4960 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4961 }
4962 case Match_InvalidGPR64shifted8:
4963 return Error(Loc, "register must be x0..x30 or xzr, without shift");
4964 case Match_InvalidGPR64shifted16:
4965 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4966 case Match_InvalidGPR64shifted32:
4967 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4968 case Match_InvalidGPR64shifted64:
4969 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4970 case Match_InvalidGPR64shifted128:
4971 return Error(
4972 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
4973 case Match_InvalidGPR64NoXZRshifted8:
4974 return Error(Loc, "register must be x0..x30 without shift");
4975 case Match_InvalidGPR64NoXZRshifted16:
4976 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4977 case Match_InvalidGPR64NoXZRshifted32:
4978 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4979 case Match_InvalidGPR64NoXZRshifted64:
4980 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4981 case Match_InvalidGPR64NoXZRshifted128:
4982 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
4983 case Match_InvalidZPR32UXTW8:
4984 case Match_InvalidZPR32SXTW8:
4985 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4986 case Match_InvalidZPR32UXTW16:
4987 case Match_InvalidZPR32SXTW16:
4988 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4989 case Match_InvalidZPR32UXTW32:
4990 case Match_InvalidZPR32SXTW32:
4991 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4992 case Match_InvalidZPR32UXTW64:
4993 case Match_InvalidZPR32SXTW64:
4994 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4995 case Match_InvalidZPR64UXTW8:
4996 case Match_InvalidZPR64SXTW8:
4997 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4998 case Match_InvalidZPR64UXTW16:
4999 case Match_InvalidZPR64SXTW16:
5000 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
5001 case Match_InvalidZPR64UXTW32:
5002 case Match_InvalidZPR64SXTW32:
5003 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
5004 case Match_InvalidZPR64UXTW64:
5005 case Match_InvalidZPR64SXTW64:
5006 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
5007 case Match_InvalidZPR32LSL8:
5008 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
5009 case Match_InvalidZPR32LSL16:
5010 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
5011 case Match_InvalidZPR32LSL32:
5012 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
5013 case Match_InvalidZPR32LSL64:
5014 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
5015 case Match_InvalidZPR64LSL8:
5016 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
5017 case Match_InvalidZPR64LSL16:
5018 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
5019 case Match_InvalidZPR64LSL32:
5020 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
5021 case Match_InvalidZPR64LSL64:
5022 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
5023 case Match_InvalidZPR0:
5024 return Error(Loc, "expected register without element width suffix");
5025 case Match_InvalidZPR8:
5026 case Match_InvalidZPR16:
5027 case Match_InvalidZPR32:
5028 case Match_InvalidZPR64:
5029 case Match_InvalidZPR128:
5030 return Error(Loc, "invalid element width");
5031 case Match_InvalidZPR_3b8:
5032 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
5033 case Match_InvalidZPR_3b16:
5034 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
5035 case Match_InvalidZPR_3b32:
5036 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
5037 case Match_InvalidZPR_4b16:
5038 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
5039 case Match_InvalidZPR_4b32:
5040 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
5041 case Match_InvalidZPR_4b64:
5042 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
5043 case Match_InvalidSVEPattern:
5044 return Error(Loc, "invalid predicate pattern");
5045 case Match_InvalidSVEPredicateAnyReg:
5046 case Match_InvalidSVEPredicateBReg:
5047 case Match_InvalidSVEPredicateHReg:
5048 case Match_InvalidSVEPredicateSReg:
5049 case Match_InvalidSVEPredicateDReg:
5050 return Error(Loc, "invalid predicate register.");
5051 case Match_InvalidSVEPredicate3bAnyReg:
5052 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
5053 case Match_InvalidSVEPredicate3bBReg:
5054 return Error(Loc, "invalid restricted predicate register, expected p0.b..p7.b");
5055 case Match_InvalidSVEPredicate3bHReg:
5056 return Error(Loc, "invalid restricted predicate register, expected p0.h..p7.h");
5057 case Match_InvalidSVEPredicate3bSReg:
5058 return Error(Loc, "invalid restricted predicate register, expected p0.s..p7.s");
5059 case Match_InvalidSVEPredicate3bDReg:
5060 return Error(Loc, "invalid restricted predicate register, expected p0.d..p7.d");
5061 case Match_InvalidSVEExactFPImmOperandHalfOne:
5062 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
5063 case Match_InvalidSVEExactFPImmOperandHalfTwo:
5064 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
5065 case Match_InvalidSVEExactFPImmOperandZeroOne:
5066 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
5067 case Match_InvalidMatrixTileVectorH8:
5068 case Match_InvalidMatrixTileVectorV8:
5069 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
5070 case Match_InvalidMatrixTileVectorH16:
5071 case Match_InvalidMatrixTileVectorV16:
5072 return Error(Loc,
5073 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
5074 case Match_InvalidMatrixTileVectorH32:
5075 case Match_InvalidMatrixTileVectorV32:
5076 return Error(Loc,
5077 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
5078 case Match_InvalidMatrixTileVectorH64:
5079 case Match_InvalidMatrixTileVectorV64:
5080 return Error(Loc,
5081 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
5082 case Match_InvalidMatrixTileVectorH128:
5083 case Match_InvalidMatrixTileVectorV128:
5084 return Error(Loc,
5085 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
5086 case Match_InvalidMatrixTile32:
5087 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
5088 case Match_InvalidMatrixTile64:
5089 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
5090 case Match_InvalidMatrix:
5091 return Error(Loc, "invalid matrix operand, expected za");
5092 case Match_InvalidMatrixIndexGPR32_12_15:
5093 return Error(Loc, "operand must be a register in range [w12, w15]");
5094 default:
5095 llvm_unreachable("unexpected error code!")::llvm::llvm_unreachable_internal("unexpected error code!", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5095)
;
5096 }
5097}
5098
5099static const char *getSubtargetFeatureName(uint64_t Val);
5100
5101bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
5102 OperandVector &Operands,
5103 MCStreamer &Out,
5104 uint64_t &ErrorInfo,
5105 bool MatchingInlineAsm) {
5106 assert(!Operands.empty() && "Unexpect empty operand list!")(static_cast <bool> (!Operands.empty() && "Unexpect empty operand list!"
) ? void (0) : __assert_fail ("!Operands.empty() && \"Unexpect empty operand list!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5106, __extension__ __PRETTY_FUNCTION__))
;
5107 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
5108 assert(Op.isToken() && "Leading operand should always be a mnemonic!")(static_cast <bool> (Op.isToken() && "Leading operand should always be a mnemonic!"
) ? void (0) : __assert_fail ("Op.isToken() && \"Leading operand should always be a mnemonic!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5108, __extension__ __PRETTY_FUNCTION__))
;
5109
5110 StringRef Tok = Op.getToken();
5111 unsigned NumOperands = Operands.size();
5112
5113 if (NumOperands == 4 && Tok == "lsl") {
5114 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
5115 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5116 if (Op2.isScalarReg() && Op3.isImm()) {
5117 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5118 if (Op3CE) {
5119 uint64_t Op3Val = Op3CE->getValue();
5120 uint64_t NewOp3Val = 0;
5121 uint64_t NewOp4Val = 0;
5122 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
5123 Op2.getReg())) {
5124 NewOp3Val = (32 - Op3Val) & 0x1f;
5125 NewOp4Val = 31 - Op3Val;
5126 } else {
5127 NewOp3Val = (64 - Op3Val) & 0x3f;
5128 NewOp4Val = 63 - Op3Val;
5129 }
5130
5131 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
5132 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
5133
5134 Operands[0] =
5135 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
5136 Operands.push_back(AArch64Operand::CreateImm(
5137 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
5138 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
5139 Op3.getEndLoc(), getContext());
5140 }
5141 }
5142 } else if (NumOperands == 4 && Tok == "bfc") {
5143 // FIXME: Horrible hack to handle BFC->BFM alias.
5144 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5145 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
5146 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
5147
5148 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
5149 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
5150 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
5151
5152 if (LSBCE && WidthCE) {
5153 uint64_t LSB = LSBCE->getValue();
5154 uint64_t Width = WidthCE->getValue();
5155
5156 uint64_t RegWidth = 0;
5157 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5158 Op1.getReg()))
5159 RegWidth = 64;
5160 else
5161 RegWidth = 32;
5162
5163 if (LSB >= RegWidth)
5164 return Error(LSBOp.getStartLoc(),
5165 "expected integer in range [0, 31]");
5166 if (Width < 1 || Width > RegWidth)
5167 return Error(WidthOp.getStartLoc(),
5168 "expected integer in range [1, 32]");
5169
5170 uint64_t ImmR = 0;
5171 if (RegWidth == 32)
5172 ImmR = (32 - LSB) & 0x1f;
5173 else
5174 ImmR = (64 - LSB) & 0x3f;
5175
5176 uint64_t ImmS = Width - 1;
5177
5178 if (ImmR != 0 && ImmS >= ImmR)
5179 return Error(WidthOp.getStartLoc(),
5180 "requested insert overflows register");
5181
5182 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
5183 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
5184 Operands[0] =
5185 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
5186 Operands[2] = AArch64Operand::CreateReg(
5187 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
5188 SMLoc(), SMLoc(), getContext());
5189 Operands[3] = AArch64Operand::CreateImm(
5190 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
5191 Operands.emplace_back(
5192 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
5193 WidthOp.getEndLoc(), getContext()));
5194 }
5195 }
5196 } else if (NumOperands == 5) {
5197 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
5198 // UBFIZ -> UBFM aliases.
5199 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
5200 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5201 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5202 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
5203
5204 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
5205 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5206 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
5207
5208 if (Op3CE && Op4CE) {
5209 uint64_t Op3Val = Op3CE->getValue();
5210 uint64_t Op4Val = Op4CE->getValue();
5211
5212 uint64_t RegWidth = 0;
5213 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5214 Op1.getReg()))
5215 RegWidth = 64;
5216 else
5217 RegWidth = 32;
5218
5219 if (Op3Val >= RegWidth)
5220 return Error(Op3.getStartLoc(),
5221 "expected integer in range [0, 31]");
5222 if (Op4Val < 1 || Op4Val > RegWidth)
5223 return Error(Op4.getStartLoc(),
5224 "expected integer in range [1, 32]");
5225
5226 uint64_t NewOp3Val = 0;
5227 if (RegWidth == 32)
5228 NewOp3Val = (32 - Op3Val) & 0x1f;
5229 else
5230 NewOp3Val = (64 - Op3Val) & 0x3f;
5231
5232 uint64_t NewOp4Val = Op4Val - 1;
5233
5234 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
5235 return Error(Op4.getStartLoc(),
5236 "requested insert overflows register");
5237
5238 const MCExpr *NewOp3 =
5239 MCConstantExpr::create(NewOp3Val, getContext());
5240 const MCExpr *NewOp4 =
5241 MCConstantExpr::create(NewOp4Val, getContext());
5242 Operands[3] = AArch64Operand::CreateImm(
5243 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
5244 Operands[4] = AArch64Operand::CreateImm(
5245 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
5246 if (Tok == "bfi")
5247 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
5248 getContext());
5249 else if (Tok == "sbfiz")
5250 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
5251 getContext());
5252 else if (Tok == "ubfiz")
5253 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
5254 getContext());
5255 else
5256 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5256)
;
5257 }
5258 }
5259
5260 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
5261 // UBFX -> UBFM aliases.
5262 } else if (NumOperands == 5 &&
5263 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
5264 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5265 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5266 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
5267
5268 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
5269 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5270 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
5271
5272 if (Op3CE && Op4CE) {
5273 uint64_t Op3Val = Op3CE->getValue();
5274 uint64_t Op4Val = Op4CE->getValue();
5275
5276 uint64_t RegWidth = 0;
5277 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5278 Op1.getReg()))
5279 RegWidth = 64;
5280 else
5281 RegWidth = 32;
5282
5283 if (Op3Val >= RegWidth)
5284 return Error(Op3.getStartLoc(),
5285 "expected integer in range [0, 31]");
5286 if (Op4Val < 1 || Op4Val > RegWidth)
5287 return Error(Op4.getStartLoc(),
5288 "expected integer in range [1, 32]");
5289
5290 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
5291
5292 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
5293 return Error(Op4.getStartLoc(),
5294 "requested extract overflows register");
5295
5296 const MCExpr *NewOp4 =
5297 MCConstantExpr::create(NewOp4Val, getContext());
5298 Operands[4] = AArch64Operand::CreateImm(
5299 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
5300 if (Tok == "bfxil")
5301 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
5302 getContext());
5303 else if (Tok == "sbfx")
5304 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
5305 getContext());
5306 else if (Tok == "ubfx")
5307 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
5308 getContext());
5309 else
5310 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5310)
;
5311 }
5312 }
5313 }
5314 }
5315
5316 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
5317 // instruction for FP registers correctly in some rare circumstances. Convert
5318 // it to a safe instruction and warn (because silently changing someone's
5319 // assembly is rude).
5320 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
5321 NumOperands == 4 && Tok == "movi") {
5322 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5323 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
5324 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5325 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
5326 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
5327 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
5328 if (Suffix.lower() == ".2d" &&
5329 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
5330 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
5331 " correctly on this CPU, converting to equivalent movi.16b");
5332 // Switch the suffix to .16b.
5333 unsigned Idx = Op1.isToken() ? 1 : 2;
5334 Operands[Idx] =
5335 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
5336 }
5337 }
5338 }
5339
5340 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
5341 // InstAlias can't quite handle this since the reg classes aren't
5342 // subclasses.
5343 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
5344 // The source register can be Wn here, but the matcher expects a
5345 // GPR64. Twiddle it here if necessary.
5346 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5347 if (Op.isScalarReg()) {
5348 unsigned Reg = getXRegFromWReg(Op.getReg());
5349 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5350 Op.getStartLoc(), Op.getEndLoc(),
5351 getContext());
5352 }
5353 }
5354 // FIXME: Likewise for sxt[bh] with a Xd dst operand
5355 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
5356 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5357 if (Op.isScalarReg() &&
5358 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5359 Op.getReg())) {
5360 // The source register can be Wn here, but the matcher expects a
5361 // GPR64. Twiddle it here if necessary.
5362 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5363 if (Op.isScalarReg()) {
5364 unsigned Reg = getXRegFromWReg(Op.getReg());
5365 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5366 Op.getStartLoc(),
5367 Op.getEndLoc(), getContext());
5368 }
5369 }
5370 }
5371 // FIXME: Likewise for uxt[bh] with a Xd dst operand
5372 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
5373 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5374 if (Op.isScalarReg() &&
5375 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5376 Op.getReg())) {
5377 // The source register can be Wn here, but the matcher expects a
5378 // GPR32. Twiddle it here if necessary.
5379 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5380 if (Op.isScalarReg()) {
5381 unsigned Reg = getWRegFromXReg(Op.getReg());
5382 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5383 Op.getStartLoc(),
5384 Op.getEndLoc(), getContext());
5385 }
5386 }
5387 }
5388
5389 MCInst Inst;
5390 FeatureBitset MissingFeatures;
5391 // First try to match against the secondary set of tables containing the
5392 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
5393 unsigned MatchResult =
5394 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5395 MatchingInlineAsm, 1);
5396
5397 // If that fails, try against the alternate table containing long-form NEON:
5398 // "fadd v0.2s, v1.2s, v2.2s"
5399 if (MatchResult != Match_Success) {
5400 // But first, save the short-form match result: we can use it in case the
5401 // long-form match also fails.
5402 auto ShortFormNEONErrorInfo = ErrorInfo;
5403 auto ShortFormNEONMatchResult = MatchResult;
5404 auto ShortFormNEONMissingFeatures = MissingFeatures;
5405
5406 MatchResult =
5407 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5408 MatchingInlineAsm, 0);
5409
5410 // Now, both matches failed, and the long-form match failed on the mnemonic
5411 // suffix token operand. The short-form match failure is probably more
5412 // relevant: use it instead.
5413 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
5414 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
5415 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
5416 MatchResult = ShortFormNEONMatchResult;
5417 ErrorInfo = ShortFormNEONErrorInfo;
5418 MissingFeatures = ShortFormNEONMissingFeatures;
5419 }
5420 }
5421
5422 switch (MatchResult) {
5423 case Match_Success: {
5424 // Perform range checking and other semantic validations
5425 SmallVector<SMLoc, 8> OperandLocs;
5426 NumOperands = Operands.size();
5427 for (unsigned i = 1; i < NumOperands; ++i)
5428 OperandLocs.push_back(Operands[i]->getStartLoc());
5429 if (validateInstruction(Inst, IDLoc, OperandLocs))
5430 return true;
5431
5432 Inst.setLoc(IDLoc);
5433 Out.emitInstruction(Inst, getSTI());
5434 return false;
5435 }
5436 case Match_MissingFeature: {
5437 assert(MissingFeatures.any() && "Unknown missing feature!")(static_cast <bool> (MissingFeatures.any() && "Unknown missing feature!"
) ? void (0) : __assert_fail ("MissingFeatures.any() && \"Unknown missing feature!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5437, __extension__ __PRETTY_FUNCTION__))
;
5438 // Special case the error message for the very common case where only
5439 // a single subtarget feature is missing (neon, e.g.).
5440 std::string Msg = "instruction requires:";
5441 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
5442 if (MissingFeatures[i]) {
5443 Msg += " ";
5444 Msg += getSubtargetFeatureName(i);
5445 }
5446 }
5447 return Error(IDLoc, Msg);
5448 }
5449 case Match_MnemonicFail:
5450 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
5451 case Match_InvalidOperand: {
5452 SMLoc ErrorLoc = IDLoc;
5453
5454 if (ErrorInfo != ~0ULL) {
5455 if (ErrorInfo >= Operands.size())
5456 return Error(IDLoc, "too few operands for instruction",
5457 SMRange(IDLoc, getTok().getLoc()));
5458
5459 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5460 if (ErrorLoc == SMLoc())
5461 ErrorLoc = IDLoc;
5462 }
5463 // If the match failed on a suffix token operand, tweak the diagnostic
5464 // accordingly.
5465 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
5466 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
5467 MatchResult = Match_InvalidSuffix;
5468
5469 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5470 }
5471 case Match_InvalidTiedOperand:
5472 case Match_InvalidMemoryIndexed1:
5473 case Match_InvalidMemoryIndexed2:
5474 case Match_InvalidMemoryIndexed4:
5475 case Match_InvalidMemoryIndexed8:
5476 case Match_InvalidMemoryIndexed16:
5477 case Match_InvalidCondCode:
5478 case Match_AddSubRegExtendSmall:
5479 case Match_AddSubRegExtendLarge:
5480 case Match_AddSubSecondSource:
5481 case Match_LogicalSecondSource:
5482 case Match_AddSubRegShift32:
5483 case Match_AddSubRegShift64:
5484 case Match_InvalidMovImm32Shift:
5485 case Match_InvalidMovImm64Shift:
5486 case Match_InvalidFPImm:
5487 case Match_InvalidMemoryWExtend8:
5488 case Match_InvalidMemoryWExtend16:
5489 case Match_InvalidMemoryWExtend32:
5490 case Match_InvalidMemoryWExtend64:
5491 case Match_InvalidMemoryWExtend128:
5492 case Match_InvalidMemoryXExtend8:
5493 case Match_InvalidMemoryXExtend16:
5494 case Match_InvalidMemoryXExtend32:
5495 case Match_InvalidMemoryXExtend64:
5496 case Match_InvalidMemoryXExtend128:
5497 case Match_InvalidMemoryIndexed1SImm4:
5498 case Match_InvalidMemoryIndexed2SImm4:
5499 case Match_InvalidMemoryIndexed3SImm4:
5500 case Match_InvalidMemoryIndexed4SImm4:
5501 case Match_InvalidMemoryIndexed1SImm6:
5502 case Match_InvalidMemoryIndexed16SImm4:
5503 case Match_InvalidMemoryIndexed32SImm4:
5504 case Match_InvalidMemoryIndexed4SImm7:
5505 case Match_InvalidMemoryIndexed8SImm7:
5506 case Match_InvalidMemoryIndexed16SImm7:
5507 case Match_InvalidMemoryIndexed8UImm5:
5508 case Match_InvalidMemoryIndexed4UImm5:
5509 case Match_InvalidMemoryIndexed2UImm5:
5510 case Match_InvalidMemoryIndexed1UImm6:
5511 case Match_InvalidMemoryIndexed2UImm6:
5512 case Match_InvalidMemoryIndexed4UImm6:
5513 case Match_InvalidMemoryIndexed8UImm6:
5514 case Match_InvalidMemoryIndexed16UImm6:
5515 case Match_InvalidMemoryIndexedSImm6:
5516 case Match_InvalidMemoryIndexedSImm5:
5517 case Match_InvalidMemoryIndexedSImm8:
5518 case Match_InvalidMemoryIndexedSImm9:
5519 case Match_InvalidMemoryIndexed16SImm9:
5520 case Match_InvalidMemoryIndexed8SImm10:
5521 case Match_InvalidImm0_1:
5522 case Match_InvalidImm0_3:
5523 case Match_InvalidImm0_7:
5524 case Match_InvalidImm0_15:
5525 case Match_InvalidImm0_31:
5526 case Match_InvalidImm0_63:
5527 case Match_InvalidImm0_127:
5528 case Match_InvalidImm0_255:
5529 case Match_InvalidImm0_65535:
5530 case Match_InvalidImm1_8:
5531 case Match_InvalidImm1_16:
5532 case Match_InvalidImm1_32:
5533 case Match_InvalidImm1_64:
5534 case Match_InvalidSVEAddSubImm8:
5535 case Match_InvalidSVEAddSubImm16:
5536 case Match_InvalidSVEAddSubImm32:
5537 case Match_InvalidSVEAddSubImm64:
5538 case Match_InvalidSVECpyImm8:
5539 case Match_InvalidSVECpyImm16:
5540 case Match_InvalidSVECpyImm32:
5541 case Match_InvalidSVECpyImm64:
5542 case Match_InvalidIndexRange1_1:
5543 case Match_InvalidIndexRange0_15:
5544 case Match_InvalidIndexRange0_7:
5545 case Match_InvalidIndexRange0_3:
5546 case Match_InvalidIndexRange0_1:
5547 case Match_InvalidSVEIndexRange0_63:
5548 case Match_InvalidSVEIndexRange0_31:
5549 case Match_InvalidSVEIndexRange0_15:
5550 case Match_InvalidSVEIndexRange0_7:
5551 case Match_InvalidSVEIndexRange0_3:
5552 case Match_InvalidLabel:
5553 case Match_InvalidComplexRotationEven:
5554 case Match_InvalidComplexRotationOdd:
5555 case Match_InvalidGPR64shifted8:
5556 case Match_InvalidGPR64shifted16:
5557 case Match_InvalidGPR64shifted32:
5558 case Match_InvalidGPR64shifted64:
5559 case Match_InvalidGPR64shifted128:
5560 case Match_InvalidGPR64NoXZRshifted8:
5561 case Match_InvalidGPR64NoXZRshifted16:
5562 case Match_InvalidGPR64NoXZRshifted32:
5563 case Match_InvalidGPR64NoXZRshifted64:
5564 case Match_InvalidGPR64NoXZRshifted128:
5565 case Match_InvalidZPR32UXTW8:
5566 case Match_InvalidZPR32UXTW16:
5567 case Match_InvalidZPR32UXTW32:
5568 case Match_InvalidZPR32UXTW64:
5569 case Match_InvalidZPR32SXTW8:
5570 case Match_InvalidZPR32SXTW16:
5571 case Match_InvalidZPR32SXTW32:
5572 case Match_InvalidZPR32SXTW64:
5573 case Match_InvalidZPR64UXTW8:
5574 case Match_InvalidZPR64SXTW8:
5575 case Match_InvalidZPR64UXTW16:
5576 case Match_InvalidZPR64SXTW16:
5577 case Match_InvalidZPR64UXTW32:
5578 case Match_InvalidZPR64SXTW32:
5579 case Match_InvalidZPR64UXTW64:
5580 case Match_InvalidZPR64SXTW64:
5581 case Match_InvalidZPR32LSL8:
5582 case Match_InvalidZPR32LSL16:
5583 case Match_InvalidZPR32LSL32:
5584 case Match_InvalidZPR32LSL64:
5585 case Match_InvalidZPR64LSL8:
5586 case Match_InvalidZPR64LSL16:
5587 case Match_InvalidZPR64LSL32:
5588 case Match_InvalidZPR64LSL64:
5589 case Match_InvalidZPR0:
5590 case Match_InvalidZPR8:
5591 case Match_InvalidZPR16:
5592 case Match_InvalidZPR32:
5593 case Match_InvalidZPR64:
5594 case Match_InvalidZPR128:
5595 case Match_InvalidZPR_3b8:
5596 case Match_InvalidZPR_3b16:
5597 case Match_InvalidZPR_3b32:
5598 case Match_InvalidZPR_4b16:
5599 case Match_InvalidZPR_4b32:
5600 case Match_InvalidZPR_4b64:
5601 case Match_InvalidSVEPredicateAnyReg:
5602 case Match_InvalidSVEPattern:
5603 case Match_InvalidSVEPredicateBReg:
5604 case Match_InvalidSVEPredicateHReg:
5605 case Match_InvalidSVEPredicateSReg:
5606 case Match_InvalidSVEPredicateDReg:
5607 case Match_InvalidSVEPredicate3bAnyReg:
5608 case Match_InvalidSVEPredicate3bBReg:
5609 case Match_InvalidSVEPredicate3bHReg:
5610 case Match_InvalidSVEPredicate3bSReg:
5611 case Match_InvalidSVEPredicate3bDReg:
5612 case Match_InvalidSVEExactFPImmOperandHalfOne:
5613 case Match_InvalidSVEExactFPImmOperandHalfTwo:
5614 case Match_InvalidSVEExactFPImmOperandZeroOne:
5615 case Match_InvalidMatrixTile32:
5616 case Match_InvalidMatrixTile64:
5617 case Match_InvalidMatrix:
5618 case Match_InvalidMatrixTileVectorH8:
5619 case Match_InvalidMatrixTileVectorH16:
5620 case Match_InvalidMatrixTileVectorH32:
5621 case Match_InvalidMatrixTileVectorH64:
5622 case Match_InvalidMatrixTileVectorH128:
5623 case Match_InvalidMatrixTileVectorV8:
5624 case Match_InvalidMatrixTileVectorV16:
5625 case Match_InvalidMatrixTileVectorV32:
5626 case Match_InvalidMatrixTileVectorV64:
5627 case Match_InvalidMatrixTileVectorV128:
5628 case Match_InvalidSVCR:
5629 case Match_InvalidMatrixIndexGPR32_12_15:
5630 case Match_MSR:
5631 case Match_MRS: {
5632 if (ErrorInfo >= Operands.size())
5633 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5634 // Any time we get here, there's nothing fancy to do. Just get the
5635 // operand SMLoc and display the diagnostic.
5636 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5637 if (ErrorLoc == SMLoc())
5638 ErrorLoc = IDLoc;
5639 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5640 }
5641 }
5642
5643 llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5643)
;
5644}
5645
5646/// ParseDirective parses the arm specific directives
5647bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5648 const MCContext::Environment Format = getContext().getObjectFileType();
5649 bool IsMachO = Format == MCContext::IsMachO;
1
Assuming 'Format' is not equal to IsMachO
5650 bool IsCOFF = Format == MCContext::IsCOFF;
2
Assuming 'Format' is equal to IsCOFF
5651
5652 auto IDVal = DirectiveID.getIdentifier().lower();
5653 SMLoc Loc = DirectiveID.getLoc();
5654 if (IDVal == ".arch")
3
Taking false branch
5655 parseDirectiveArch(Loc);
5656 else if (IDVal == ".cpu")
4
Taking false branch
5657 parseDirectiveCPU(Loc);
5658 else if (IDVal == ".tlsdesccall")
5
Taking false branch
5659 parseDirectiveTLSDescCall(Loc);
5660 else if (IDVal == ".ltorg" || IDVal == ".pool")
6
Taking false branch
5661 parseDirectiveLtorg(Loc);
5662 else if (IDVal == ".unreq")
7
Taking false branch
5663 parseDirectiveUnreq(Loc);
5664 else if (IDVal == ".inst")
8
Taking false branch
5665 parseDirectiveInst(Loc);
5666 else if (IDVal == ".cfi_negate_ra_state")
9
Taking false branch
5667 parseDirectiveCFINegateRAState();
5668 else if (IDVal == ".cfi_b_key_frame")
10
Taking false branch
5669 parseDirectiveCFIBKeyFrame();
5670 else if (IDVal == ".arch_extension")
11
Taking false branch
5671 parseDirectiveArchExtension(Loc);
5672 else if (IDVal == ".variant_pcs")
12
Taking false branch
5673 parseDirectiveVariantPCS(Loc);
5674 else if (IsMachO
12.1
'IsMachO' is false
) {
13
Taking false branch
5675 if (IDVal == MCLOHDirectiveName())
5676 parseDirectiveLOH(IDVal, Loc);
5677 else
5678 return true;
5679 } else if (IsCOFF
13.1
'IsCOFF' is true
) {
14
Taking true branch
5680 if (IDVal == ".seh_stackalloc")
15
Taking false branch
5681 parseDirectiveSEHAllocStack(Loc);
5682 else if (IDVal == ".seh_endprologue")
16
Taking false branch
5683 parseDirectiveSEHPrologEnd(Loc);
5684 else if (IDVal == ".seh_save_r19r20_x")
17
Taking false branch
5685 parseDirectiveSEHSaveR19R20X(Loc);
5686 else if (IDVal == ".seh_save_fplr")
18
Taking false branch
5687 parseDirectiveSEHSaveFPLR(Loc);
5688 else if (IDVal == ".seh_save_fplr_x")
19
Taking false branch
5689 parseDirectiveSEHSaveFPLRX(Loc);
5690 else if (IDVal == ".seh_save_reg")
20
Taking false branch
5691 parseDirectiveSEHSaveReg(Loc);
5692 else if (IDVal == ".seh_save_reg_x")
21
Taking false branch
5693 parseDirectiveSEHSaveRegX(Loc);
5694 else if (IDVal == ".seh_save_regp")
22
Taking true branch
5695 parseDirectiveSEHSaveRegP(Loc);
23
Calling 'AArch64AsmParser::parseDirectiveSEHSaveRegP'
5696 else if (IDVal == ".seh_save_regp_x")
5697 parseDirectiveSEHSaveRegPX(Loc);
5698 else if (IDVal == ".seh_save_lrpair")
5699 parseDirectiveSEHSaveLRPair(Loc);
5700 else if (IDVal == ".seh_save_freg")
5701 parseDirectiveSEHSaveFReg(Loc);
5702 else if (IDVal == ".seh_save_freg_x")
5703 parseDirectiveSEHSaveFRegX(Loc);
5704 else if (IDVal == ".seh_save_fregp")
5705 parseDirectiveSEHSaveFRegP(Loc);
5706 else if (IDVal == ".seh_save_fregp_x")
5707 parseDirectiveSEHSaveFRegPX(Loc);
5708 else if (IDVal == ".seh_set_fp")
5709 parseDirectiveSEHSetFP(Loc);
5710 else if (IDVal == ".seh_add_fp")
5711 parseDirectiveSEHAddFP(Loc);
5712 else if (IDVal == ".seh_nop")
5713 parseDirectiveSEHNop(Loc);
5714 else if (IDVal == ".seh_save_next")
5715 parseDirectiveSEHSaveNext(Loc);
5716 else if (IDVal == ".seh_startepilogue")
5717 parseDirectiveSEHEpilogStart(Loc);