Bug Summary

File:llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Warning:line 4268, column 15
The left operand of '==' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AArch64AsmParser.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/build-llvm/lib/Target/AArch64/AsmParser -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/build-llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/build-llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include -I /build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/build-llvm/lib/Target/AArch64/AsmParser/.. -I /build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/.. -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/build-llvm/lib/Target/AArch64/AsmParser -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-07-23-023125-8721-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
<
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "MCTargetDesc/AArch64AddressingModes.h"
10#include "MCTargetDesc/AArch64InstPrinter.h"
11#include "MCTargetDesc/AArch64MCExpr.h"
12#include "MCTargetDesc/AArch64MCTargetDesc.h"
13#include "MCTargetDesc/AArch64TargetStreamer.h"
14#include "TargetInfo/AArch64TargetInfo.h"
15#include "AArch64InstrInfo.h"
16#include "Utils/AArch64BaseInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/StringExtras.h"
23#include "llvm/ADT/StringMap.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/StringSwitch.h"
26#include "llvm/ADT/Twine.h"
27#include "llvm/MC/MCContext.h"
28#include "llvm/MC/MCExpr.h"
29#include "llvm/MC/MCInst.h"
30#include "llvm/MC/MCLinkerOptimizationHint.h"
31#include "llvm/MC/MCObjectFileInfo.h"
32#include "llvm/MC/MCParser/MCAsmLexer.h"
33#include "llvm/MC/MCParser/MCAsmParser.h"
34#include "llvm/MC/MCParser/MCAsmParserExtension.h"
35#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
36#include "llvm/MC/MCParser/MCTargetAsmParser.h"
37#include "llvm/MC/MCRegisterInfo.h"
38#include "llvm/MC/MCStreamer.h"
39#include "llvm/MC/MCSubtargetInfo.h"
40#include "llvm/MC/MCSymbol.h"
41#include "llvm/MC/MCTargetOptions.h"
42#include "llvm/MC/SubtargetFeature.h"
43#include "llvm/MC/MCValue.h"
44#include "llvm/Support/Casting.h"
45#include "llvm/Support/Compiler.h"
46#include "llvm/Support/ErrorHandling.h"
47#include "llvm/Support/MathExtras.h"
48#include "llvm/Support/SMLoc.h"
49#include "llvm/Support/TargetParser.h"
50#include "llvm/Support/TargetRegistry.h"
51#include "llvm/Support/raw_ostream.h"
52#include <cassert>
53#include <cctype>
54#include <cstdint>
55#include <cstdio>
56#include <string>
57#include <tuple>
58#include <utility>
59#include <vector>
60
61using namespace llvm;
62
63namespace {
64
65enum class RegKind {
66 Scalar,
67 NeonVector,
68 SVEDataVector,
69 SVEPredicateVector,
70 Matrix
71};
72
73enum class MatrixKind { Array, Tile, Row, Col };
74
75enum RegConstraintEqualityTy {
76 EqualsReg,
77 EqualsSuperReg,
78 EqualsSubReg
79};
80
81class AArch64AsmParser : public MCTargetAsmParser {
82private:
83 StringRef Mnemonic; ///< Instruction mnemonic.
84
85 // Map of register aliases registers via the .req directive.
86 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
87
88 class PrefixInfo {
89 public:
90 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
91 PrefixInfo Prefix;
92 switch (Inst.getOpcode()) {
93 case AArch64::MOVPRFX_ZZ:
94 Prefix.Active = true;
95 Prefix.Dst = Inst.getOperand(0).getReg();
96 break;
97 case AArch64::MOVPRFX_ZPmZ_B:
98 case AArch64::MOVPRFX_ZPmZ_H:
99 case AArch64::MOVPRFX_ZPmZ_S:
100 case AArch64::MOVPRFX_ZPmZ_D:
101 Prefix.Active = true;
102 Prefix.Predicated = true;
103 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
104 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 105, __extension__ __PRETTY_FUNCTION__))
105 "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 105, __extension__ __PRETTY_FUNCTION__))
;
106 Prefix.Dst = Inst.getOperand(0).getReg();
107 Prefix.Pg = Inst.getOperand(2).getReg();
108 break;
109 case AArch64::MOVPRFX_ZPzZ_B:
110 case AArch64::MOVPRFX_ZPzZ_H:
111 case AArch64::MOVPRFX_ZPzZ_S:
112 case AArch64::MOVPRFX_ZPzZ_D:
113 Prefix.Active = true;
114 Prefix.Predicated = true;
115 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
116 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 117, __extension__ __PRETTY_FUNCTION__))
117 "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 117, __extension__ __PRETTY_FUNCTION__))
;
118 Prefix.Dst = Inst.getOperand(0).getReg();
119 Prefix.Pg = Inst.getOperand(1).getReg();
120 break;
121 default:
122 break;
123 }
124
125 return Prefix;
126 }
127
128 PrefixInfo() : Active(false), Predicated(false) {}
129 bool isActive() const { return Active; }
130 bool isPredicated() const { return Predicated; }
131 unsigned getElementSize() const {
132 assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail
("Predicated", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 132, __extension__ __PRETTY_FUNCTION__))
;
133 return ElementSize;
134 }
135 unsigned getDstReg() const { return Dst; }
136 unsigned getPgReg() const {
137 assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail
("Predicated", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 137, __extension__ __PRETTY_FUNCTION__))
;
138 return Pg;
139 }
140
141 private:
142 bool Active;
143 bool Predicated;
144 unsigned ElementSize;
145 unsigned Dst;
146 unsigned Pg;
147 } NextPrefix;
148
149 AArch64TargetStreamer &getTargetStreamer() {
150 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
151 return static_cast<AArch64TargetStreamer &>(TS);
152 }
153
154 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
155
156 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
157 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
158 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
159 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
160 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
161 bool parseRegister(OperandVector &Operands);
162 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
163 bool parseNeonVectorList(OperandVector &Operands);
164 bool parseOptionalMulOperand(OperandVector &Operands);
165 bool parseKeywordOperand(OperandVector &Operands);
166 bool parseOperand(OperandVector &Operands, bool isCondCode,
167 bool invertCondCode);
168 bool parseImmExpr(int64_t &Out);
169 bool parseComma();
170 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
171 unsigned Last);
172
173 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
174 OperandVector &Operands);
175
176 bool parseDirectiveArch(SMLoc L);
177 bool parseDirectiveArchExtension(SMLoc L);
178 bool parseDirectiveCPU(SMLoc L);
179 bool parseDirectiveInst(SMLoc L);
180
181 bool parseDirectiveTLSDescCall(SMLoc L);
182
183 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
184 bool parseDirectiveLtorg(SMLoc L);
185
186 bool parseDirectiveReq(StringRef Name, SMLoc L);
187 bool parseDirectiveUnreq(SMLoc L);
188 bool parseDirectiveCFINegateRAState();
189 bool parseDirectiveCFIBKeyFrame();
190
191 bool parseDirectiveVariantPCS(SMLoc L);
192
193 bool parseDirectiveSEHAllocStack(SMLoc L);
194 bool parseDirectiveSEHPrologEnd(SMLoc L);
195 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
196 bool parseDirectiveSEHSaveFPLR(SMLoc L);
197 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
198 bool parseDirectiveSEHSaveReg(SMLoc L);
199 bool parseDirectiveSEHSaveRegX(SMLoc L);
200 bool parseDirectiveSEHSaveRegP(SMLoc L);
201 bool parseDirectiveSEHSaveRegPX(SMLoc L);
202 bool parseDirectiveSEHSaveLRPair(SMLoc L);
203 bool parseDirectiveSEHSaveFReg(SMLoc L);
204 bool parseDirectiveSEHSaveFRegX(SMLoc L);
205 bool parseDirectiveSEHSaveFRegP(SMLoc L);
206 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
207 bool parseDirectiveSEHSetFP(SMLoc L);
208 bool parseDirectiveSEHAddFP(SMLoc L);
209 bool parseDirectiveSEHNop(SMLoc L);
210 bool parseDirectiveSEHSaveNext(SMLoc L);
211 bool parseDirectiveSEHEpilogStart(SMLoc L);
212 bool parseDirectiveSEHEpilogEnd(SMLoc L);
213 bool parseDirectiveSEHTrapFrame(SMLoc L);
214 bool parseDirectiveSEHMachineFrame(SMLoc L);
215 bool parseDirectiveSEHContext(SMLoc L);
216 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
217
218 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
219 SmallVectorImpl<SMLoc> &Loc);
220 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
221 OperandVector &Operands, MCStreamer &Out,
222 uint64_t &ErrorInfo,
223 bool MatchingInlineAsm) override;
224/// @name Auto-generated Match Functions
225/// {
226
227#define GET_ASSEMBLER_HEADER
228#include "AArch64GenAsmMatcher.inc"
229
230 /// }
231
232 OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
233 OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
234 RegKind MatchKind);
235 OperandMatchResultTy tryParseMatrixRegister(OperandVector &Operands);
236 OperandMatchResultTy tryParseSVCR(OperandVector &Operands);
237 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
238 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
239 OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
240 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
241 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
242 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
243 template <bool IsSVEPrefetch = false>
244 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
245 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
246 OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
247 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
248 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
249 template<bool AddFPZeroAsLiteral>
250 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
251 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
252 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
253 bool tryParseNeonVectorRegister(OperandVector &Operands);
254 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
255 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
256 template <bool ParseShiftExtend,
257 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
258 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
259 template <bool ParseShiftExtend, bool ParseSuffix>
260 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
261 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
262 template <RegKind VectorKind>
263 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
264 bool ExpectMatch = false);
265 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
266 OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands);
267
268public:
269 enum AArch64MatchResultTy {
270 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
271#define GET_OPERAND_DIAGNOSTIC_TYPES
272#include "AArch64GenAsmMatcher.inc"
273 };
274 bool IsILP32;
275
276 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
277 const MCInstrInfo &MII, const MCTargetOptions &Options)
278 : MCTargetAsmParser(Options, STI, MII) {
279 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
280 MCAsmParserExtension::Initialize(Parser);
281 MCStreamer &S = getParser().getStreamer();
282 if (S.getTargetStreamer() == nullptr)
283 new AArch64TargetStreamer(S);
284
285 // Alias .hword/.word/.[dx]word to the target-independent
286 // .2byte/.4byte/.8byte directives as they have the same form and
287 // semantics:
288 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
289 Parser.addAliasForDirective(".hword", ".2byte");
290 Parser.addAliasForDirective(".word", ".4byte");
291 Parser.addAliasForDirective(".dword", ".8byte");
292 Parser.addAliasForDirective(".xword", ".8byte");
293
294 // Initialize the set of available features.
295 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
296 }
297
298 bool regsEqual(const MCParsedAsmOperand &Op1,
299 const MCParsedAsmOperand &Op2) const override;
300 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
301 SMLoc NameLoc, OperandVector &Operands) override;
302 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
303 OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
304 SMLoc &EndLoc) override;
305 bool ParseDirective(AsmToken DirectiveID) override;
306 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
307 unsigned Kind) override;
308
309 static bool classifySymbolRef(const MCExpr *Expr,
310 AArch64MCExpr::VariantKind &ELFRefKind,
311 MCSymbolRefExpr::VariantKind &DarwinRefKind,
312 int64_t &Addend);
313};
314
315/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
316/// instruction.
317class AArch64Operand : public MCParsedAsmOperand {
318private:
319 enum KindTy {
320 k_Immediate,
321 k_ShiftedImm,
322 k_CondCode,
323 k_Register,
324 k_MatrixRegister,
325 k_SVCR,
326 k_VectorList,
327 k_VectorIndex,
328 k_Token,
329 k_SysReg,
330 k_SysCR,
331 k_Prefetch,
332 k_ShiftExtend,
333 k_FPImm,
334 k_Barrier,
335 k_PSBHint,
336 k_BTIHint,
337 } Kind;
338
339 SMLoc StartLoc, EndLoc;
340
341 struct TokOp {
342 const char *Data;
343 unsigned Length;
344 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
345 };
346
347 // Separate shift/extend operand.
348 struct ShiftExtendOp {
349 AArch64_AM::ShiftExtendType Type;
350 unsigned Amount;
351 bool HasExplicitAmount;
352 };
353
354 struct RegOp {
355 unsigned RegNum;
356 RegKind Kind;
357 int ElementWidth;
358
359 // The register may be allowed as a different register class,
360 // e.g. for GPR64as32 or GPR32as64.
361 RegConstraintEqualityTy EqualityTy;
362
363 // In some cases the shift/extend needs to be explicitly parsed together
364 // with the register, rather than as a separate operand. This is needed
365 // for addressing modes where the instruction as a whole dictates the
366 // scaling/extend, rather than specific bits in the instruction.
367 // By parsing them as a single operand, we avoid the need to pass an
368 // extra operand in all CodeGen patterns (because all operands need to
369 // have an associated value), and we avoid the need to update TableGen to
370 // accept operands that have no associated bits in the instruction.
371 //
372 // An added benefit of parsing them together is that the assembler
373 // can give a sensible diagnostic if the scaling is not correct.
374 //
375 // The default is 'lsl #0' (HasExplicitAmount = false) if no
376 // ShiftExtend is specified.
377 ShiftExtendOp ShiftExtend;
378 };
379
380 struct MatrixRegOp {
381 unsigned RegNum;
382 unsigned ElementWidth;
383 MatrixKind Kind;
384 };
385
386 struct VectorListOp {
387 unsigned RegNum;
388 unsigned Count;
389 unsigned NumElements;
390 unsigned ElementWidth;
391 RegKind RegisterKind;
392 };
393
394 struct VectorIndexOp {
395 int Val;
396 };
397
398 struct ImmOp {
399 const MCExpr *Val;
400 };
401
402 struct ShiftedImmOp {
403 const MCExpr *Val;
404 unsigned ShiftAmount;
405 };
406
407 struct CondCodeOp {
408 AArch64CC::CondCode Code;
409 };
410
411 struct FPImmOp {
412 uint64_t Val; // APFloat value bitcasted to uint64_t.
413 bool IsExact; // describes whether parsed value was exact.
414 };
415
416 struct BarrierOp {
417 const char *Data;
418 unsigned Length;
419 unsigned Val; // Not the enum since not all values have names.
420 bool HasnXSModifier;
421 };
422
423 struct SysRegOp {
424 const char *Data;
425 unsigned Length;
426 uint32_t MRSReg;
427 uint32_t MSRReg;
428 uint32_t PStateField;
429 };
430
431 struct SysCRImmOp {
432 unsigned Val;
433 };
434
435 struct PrefetchOp {
436 const char *Data;
437 unsigned Length;
438 unsigned Val;
439 };
440
441 struct PSBHintOp {
442 const char *Data;
443 unsigned Length;
444 unsigned Val;
445 };
446
447 struct BTIHintOp {
448 const char *Data;
449 unsigned Length;
450 unsigned Val;
451 };
452
453 struct SVCROp {
454 const char *Data;
455 unsigned Length;
456 unsigned PStateField;
457 };
458
459 union {
460 struct TokOp Tok;
461 struct RegOp Reg;
462 struct MatrixRegOp MatrixReg;
463 struct VectorListOp VectorList;
464 struct VectorIndexOp VectorIndex;
465 struct ImmOp Imm;
466 struct ShiftedImmOp ShiftedImm;
467 struct CondCodeOp CondCode;
468 struct FPImmOp FPImm;
469 struct BarrierOp Barrier;
470 struct SysRegOp SysReg;
471 struct SysCRImmOp SysCRImm;
472 struct PrefetchOp Prefetch;
473 struct PSBHintOp PSBHint;
474 struct BTIHintOp BTIHint;
475 struct ShiftExtendOp ShiftExtend;
476 struct SVCROp SVCR;
477 };
478
479 // Keep the MCContext around as the MCExprs may need manipulated during
480 // the add<>Operands() calls.
481 MCContext &Ctx;
482
483public:
484 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
485
486 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
487 Kind = o.Kind;
488 StartLoc = o.StartLoc;
489 EndLoc = o.EndLoc;
490 switch (Kind) {
491 case k_Token:
492 Tok = o.Tok;
493 break;
494 case k_Immediate:
495 Imm = o.Imm;
496 break;
497 case k_ShiftedImm:
498 ShiftedImm = o.ShiftedImm;
499 break;
500 case k_CondCode:
501 CondCode = o.CondCode;
502 break;
503 case k_FPImm:
504 FPImm = o.FPImm;
505 break;
506 case k_Barrier:
507 Barrier = o.Barrier;
508 break;
509 case k_Register:
510 Reg = o.Reg;
511 break;
512 case k_MatrixRegister:
513 MatrixReg = o.MatrixReg;
514 break;
515 case k_VectorList:
516 VectorList = o.VectorList;
517 break;
518 case k_VectorIndex:
519 VectorIndex = o.VectorIndex;
520 break;
521 case k_SysReg:
522 SysReg = o.SysReg;
523 break;
524 case k_SysCR:
525 SysCRImm = o.SysCRImm;
526 break;
527 case k_Prefetch:
528 Prefetch = o.Prefetch;
529 break;
530 case k_PSBHint:
531 PSBHint = o.PSBHint;
532 break;
533 case k_BTIHint:
534 BTIHint = o.BTIHint;
535 break;
536 case k_ShiftExtend:
537 ShiftExtend = o.ShiftExtend;
538 break;
539 case k_SVCR:
540 SVCR = o.SVCR;
541 break;
542 }
543 }
544
545 /// getStartLoc - Get the location of the first token of this operand.
546 SMLoc getStartLoc() const override { return StartLoc; }
547 /// getEndLoc - Get the location of the last token of this operand.
548 SMLoc getEndLoc() const override { return EndLoc; }
549
550 StringRef getToken() const {
551 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 551, __extension__ __PRETTY_FUNCTION__))
;
552 return StringRef(Tok.Data, Tok.Length);
553 }
554
555 bool isTokenSuffix() const {
556 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 556, __extension__ __PRETTY_FUNCTION__))
;
557 return Tok.IsSuffix;
558 }
559
560 const MCExpr *getImm() const {
561 assert(Kind == k_Immediate && "Invalid access!")(static_cast <bool> (Kind == k_Immediate && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 561, __extension__ __PRETTY_FUNCTION__))
;
562 return Imm.Val;
563 }
564
565 const MCExpr *getShiftedImmVal() const {
566 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 566, __extension__ __PRETTY_FUNCTION__))
;
567 return ShiftedImm.Val;
568 }
569
570 unsigned getShiftedImmShift() const {
571 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 571, __extension__ __PRETTY_FUNCTION__))
;
572 return ShiftedImm.ShiftAmount;
573 }
574
575 AArch64CC::CondCode getCondCode() const {
576 assert(Kind == k_CondCode && "Invalid access!")(static_cast <bool> (Kind == k_CondCode && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 576, __extension__ __PRETTY_FUNCTION__))
;
577 return CondCode.Code;
578 }
579
580 APFloat getFPImm() const {
581 assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 581, __extension__ __PRETTY_FUNCTION__))
;
582 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
583 }
584
585 bool getFPImmIsExact() const {
586 assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 586, __extension__ __PRETTY_FUNCTION__))
;
587 return FPImm.IsExact;
588 }
589
590 unsigned getBarrier() const {
591 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 591, __extension__ __PRETTY_FUNCTION__))
;
592 return Barrier.Val;
593 }
594
595 StringRef getBarrierName() const {
596 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 596, __extension__ __PRETTY_FUNCTION__))
;
597 return StringRef(Barrier.Data, Barrier.Length);
598 }
599
600 bool getBarriernXSModifier() const {
601 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 601, __extension__ __PRETTY_FUNCTION__))
;
602 return Barrier.HasnXSModifier;
603 }
604
605 unsigned getReg() const override {
606 assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 606, __extension__ __PRETTY_FUNCTION__))
;
607 return Reg.RegNum;
608 }
609
610 unsigned getMatrixReg() const {
611 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 611, __extension__ __PRETTY_FUNCTION__))
;
612 return MatrixReg.RegNum;
613 }
614
615 unsigned getMatrixElementWidth() const {
616 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 616, __extension__ __PRETTY_FUNCTION__))
;
617 return MatrixReg.ElementWidth;
618 }
619
620 MatrixKind getMatrixKind() const {
621 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 621, __extension__ __PRETTY_FUNCTION__))
;
622 return MatrixReg.Kind;
623 }
624
625 RegConstraintEqualityTy getRegEqualityTy() const {
626 assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 626, __extension__ __PRETTY_FUNCTION__))
;
627 return Reg.EqualityTy;
628 }
629
630 unsigned getVectorListStart() const {
631 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 631, __extension__ __PRETTY_FUNCTION__))
;
632 return VectorList.RegNum;
633 }
634
635 unsigned getVectorListCount() const {
636 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 636, __extension__ __PRETTY_FUNCTION__))
;
637 return VectorList.Count;
638 }
639
640 int getVectorIndex() const {
641 assert(Kind == k_VectorIndex && "Invalid access!")(static_cast <bool> (Kind == k_VectorIndex && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 641, __extension__ __PRETTY_FUNCTION__))
;
642 return VectorIndex.Val;
643 }
644
645 StringRef getSysReg() const {
646 assert(Kind == k_SysReg && "Invalid access!")(static_cast <bool> (Kind == k_SysReg && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 646, __extension__ __PRETTY_FUNCTION__))
;
647 return StringRef(SysReg.Data, SysReg.Length);
648 }
649
650 unsigned getSysCR() const {
651 assert(Kind == k_SysCR && "Invalid access!")(static_cast <bool> (Kind == k_SysCR && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 651, __extension__ __PRETTY_FUNCTION__))
;
652 return SysCRImm.Val;
653 }
654
655 unsigned getPrefetch() const {
656 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 656, __extension__ __PRETTY_FUNCTION__))
;
657 return Prefetch.Val;
658 }
659
660 unsigned getPSBHint() const {
661 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 661, __extension__ __PRETTY_FUNCTION__))
;
662 return PSBHint.Val;
663 }
664
665 StringRef getPSBHintName() const {
666 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 666, __extension__ __PRETTY_FUNCTION__))
;
667 return StringRef(PSBHint.Data, PSBHint.Length);
668 }
669
670 unsigned getBTIHint() const {
671 assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 671, __extension__ __PRETTY_FUNCTION__))
;
672 return BTIHint.Val;
673 }
674
675 StringRef getBTIHintName() const {
676 assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 676, __extension__ __PRETTY_FUNCTION__))
;
677 return StringRef(BTIHint.Data, BTIHint.Length);
678 }
679
680 StringRef getSVCR() const {
681 assert(Kind == k_SVCR && "Invalid access!")(static_cast <bool> (Kind == k_SVCR && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SVCR && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 681, __extension__ __PRETTY_FUNCTION__))
;
682 return StringRef(SVCR.Data, SVCR.Length);
683 }
684
685 StringRef getPrefetchName() const {
686 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 686, __extension__ __PRETTY_FUNCTION__))
;
687 return StringRef(Prefetch.Data, Prefetch.Length);
688 }
689
690 AArch64_AM::ShiftExtendType getShiftExtendType() const {
691 if (Kind == k_ShiftExtend)
692 return ShiftExtend.Type;
693 if (Kind == k_Register)
694 return Reg.ShiftExtend.Type;
695 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 695)
;
696 }
697
698 unsigned getShiftExtendAmount() const {
699 if (Kind == k_ShiftExtend)
700 return ShiftExtend.Amount;
701 if (Kind == k_Register)
702 return Reg.ShiftExtend.Amount;
703 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 703)
;
704 }
705
706 bool hasShiftExtendAmount() const {
707 if (Kind == k_ShiftExtend)
708 return ShiftExtend.HasExplicitAmount;
709 if (Kind == k_Register)
710 return Reg.ShiftExtend.HasExplicitAmount;
711 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 711)
;
712 }
713
714 bool isImm() const override { return Kind == k_Immediate; }
715 bool isMem() const override { return false; }
716
717 bool isUImm6() const {
718 if (!isImm())
719 return false;
720 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
721 if (!MCE)
722 return false;
723 int64_t Val = MCE->getValue();
724 return (Val >= 0 && Val < 64);
725 }
726
727 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
728
729 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
730 return isImmScaled<Bits, Scale>(true);
731 }
732
733 template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
734 return isImmScaled<Bits, Scale>(false);
735 }
736
737 template <int Bits, int Scale>
738 DiagnosticPredicate isImmScaled(bool Signed) const {
739 if (!isImm())
740 return DiagnosticPredicateTy::NoMatch;
741
742 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
743 if (!MCE)
744 return DiagnosticPredicateTy::NoMatch;
745
746 int64_t MinVal, MaxVal;
747 if (Signed) {
748 int64_t Shift = Bits - 1;
749 MinVal = (int64_t(1) << Shift) * -Scale;
750 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
751 } else {
752 MinVal = 0;
753 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
754 }
755
756 int64_t Val = MCE->getValue();
757 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
758 return DiagnosticPredicateTy::Match;
759
760 return DiagnosticPredicateTy::NearMatch;
761 }
762
763 DiagnosticPredicate isSVEPattern() const {
764 if (!isImm())
765 return DiagnosticPredicateTy::NoMatch;
766 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
767 if (!MCE)
768 return DiagnosticPredicateTy::NoMatch;
769 int64_t Val = MCE->getValue();
770 if (Val >= 0 && Val < 32)
771 return DiagnosticPredicateTy::Match;
772 return DiagnosticPredicateTy::NearMatch;
773 }
774
775 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
776 AArch64MCExpr::VariantKind ELFRefKind;
777 MCSymbolRefExpr::VariantKind DarwinRefKind;
778 int64_t Addend;
779 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
780 Addend)) {
781 // If we don't understand the expression, assume the best and
782 // let the fixup and relocation code deal with it.
783 return true;
784 }
785
786 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
787 ELFRefKind == AArch64MCExpr::VK_LO12 ||
788 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
789 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
790 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
791 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
792 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
793 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
794 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
795 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
796 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
797 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
798 // Note that we don't range-check the addend. It's adjusted modulo page
799 // size when converted, so there is no "out of range" condition when using
800 // @pageoff.
801 return true;
802 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
803 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
804 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
805 return Addend == 0;
806 }
807
808 return false;
809 }
810
811 template <int Scale> bool isUImm12Offset() const {
812 if (!isImm())
813 return false;
814
815 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
816 if (!MCE)
817 return isSymbolicUImm12Offset(getImm());
818
819 int64_t Val = MCE->getValue();
820 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
821 }
822
823 template <int N, int M>
824 bool isImmInRange() const {
825 if (!isImm())
826 return false;
827 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
828 if (!MCE)
829 return false;
830 int64_t Val = MCE->getValue();
831 return (Val >= N && Val <= M);
832 }
833
834 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
835 // a logical immediate can always be represented when inverted.
836 template <typename T>
837 bool isLogicalImm() const {
838 if (!isImm())
839 return false;
840 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
841 if (!MCE)
842 return false;
843
844 int64_t Val = MCE->getValue();
845 // Avoid left shift by 64 directly.
846 uint64_t Upper = UINT64_C(-1)-1UL << (sizeof(T) * 4) << (sizeof(T) * 4);
847 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
848 if ((Val & Upper) && (Val & Upper) != Upper)
849 return false;
850
851 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
852 }
853
854 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
855
856 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
857 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
858 /// immediate that can be shifted by 'Shift'.
859 template <unsigned Width>
860 Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
861 if (isShiftedImm() && Width == getShiftedImmShift())
862 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
863 return std::make_pair(CE->getValue(), Width);
864
865 if (isImm())
866 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
867 int64_t Val = CE->getValue();
868 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
869 return std::make_pair(Val >> Width, Width);
870 else
871 return std::make_pair(Val, 0u);
872 }
873
874 return {};
875 }
876
877 bool isAddSubImm() const {
878 if (!isShiftedImm() && !isImm())
879 return false;
880
881 const MCExpr *Expr;
882
883 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
884 if (isShiftedImm()) {
885 unsigned Shift = ShiftedImm.ShiftAmount;
886 Expr = ShiftedImm.Val;
887 if (Shift != 0 && Shift != 12)
888 return false;
889 } else {
890 Expr = getImm();
891 }
892
893 AArch64MCExpr::VariantKind ELFRefKind;
894 MCSymbolRefExpr::VariantKind DarwinRefKind;
895 int64_t Addend;
896 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
897 DarwinRefKind, Addend)) {
898 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
899 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
900 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
901 || ELFRefKind == AArch64MCExpr::VK_LO12
902 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
903 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
904 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
905 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
906 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
907 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
908 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
909 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
910 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
911 }
912
913 // If it's a constant, it should be a real immediate in range.
914 if (auto ShiftedVal = getShiftedVal<12>())
915 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
916
917 // If it's an expression, we hope for the best and let the fixup/relocation
918 // code deal with it.
919 return true;
920 }
921
922 bool isAddSubImmNeg() const {
923 if (!isShiftedImm() && !isImm())
924 return false;
925
926 // Otherwise it should be a real negative immediate in range.
927 if (auto ShiftedVal = getShiftedVal<12>())
928 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
929
930 return false;
931 }
932
933 // Signed value in the range -128 to +127. For element widths of
934 // 16 bits or higher it may also be a signed multiple of 256 in the
935 // range -32768 to +32512.
936 // For element-width of 8 bits a range of -128 to 255 is accepted,
937 // since a copy of a byte can be either signed/unsigned.
938 template <typename T>
939 DiagnosticPredicate isSVECpyImm() const {
940 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
941 return DiagnosticPredicateTy::NoMatch;
942
943 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
944 std::is_same<int8_t, T>::value;
945 if (auto ShiftedImm = getShiftedVal<8>())
946 if (!(IsByte && ShiftedImm->second) &&
947 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
948 << ShiftedImm->second))
949 return DiagnosticPredicateTy::Match;
950
951 return DiagnosticPredicateTy::NearMatch;
952 }
953
954 // Unsigned value in the range 0 to 255. For element widths of
955 // 16 bits or higher it may also be a signed multiple of 256 in the
956 // range 0 to 65280.
957 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
958 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
959 return DiagnosticPredicateTy::NoMatch;
960
961 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
962 std::is_same<int8_t, T>::value;
963 if (auto ShiftedImm = getShiftedVal<8>())
964 if (!(IsByte && ShiftedImm->second) &&
965 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
966 << ShiftedImm->second))
967 return DiagnosticPredicateTy::Match;
968
969 return DiagnosticPredicateTy::NearMatch;
970 }
971
972 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
973 if (isLogicalImm<T>() && !isSVECpyImm<T>())
974 return DiagnosticPredicateTy::Match;
975 return DiagnosticPredicateTy::NoMatch;
976 }
977
978 bool isCondCode() const { return Kind == k_CondCode; }
979
980 bool isSIMDImmType10() const {
981 if (!isImm())
982 return false;
983 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
984 if (!MCE)
985 return false;
986 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
987 }
988
989 template<int N>
990 bool isBranchTarget() const {
991 if (!isImm())
992 return false;
993 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
994 if (!MCE)
995 return true;
996 int64_t Val = MCE->getValue();
997 if (Val & 0x3)
998 return false;
999 assert(N > 0 && "Branch target immediate cannot be 0 bits!")(static_cast <bool> (N > 0 && "Branch target immediate cannot be 0 bits!"
) ? void (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 999, __extension__ __PRETTY_FUNCTION__))
;
1000 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1001 }
1002
1003 bool
1004 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1005 if (!isImm())
1006 return false;
1007
1008 AArch64MCExpr::VariantKind ELFRefKind;
1009 MCSymbolRefExpr::VariantKind DarwinRefKind;
1010 int64_t Addend;
1011 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1012 DarwinRefKind, Addend)) {
1013 return false;
1014 }
1015 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1016 return false;
1017
1018 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
1019 if (ELFRefKind == AllowedModifiers[i])
1020 return true;
1021 }
1022
1023 return false;
1024 }
1025
1026 bool isMovWSymbolG3() const {
1027 return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
1028 }
1029
1030 bool isMovWSymbolG2() const {
1031 return isMovWSymbol(
1032 {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
1033 AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
1034 AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
1035 AArch64MCExpr::VK_DTPREL_G2});
1036 }
1037
1038 bool isMovWSymbolG1() const {
1039 return isMovWSymbol(
1040 {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
1041 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
1042 AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
1043 AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
1044 AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
1045 }
1046
1047 bool isMovWSymbolG0() const {
1048 return isMovWSymbol(
1049 {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
1050 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
1051 AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
1052 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
1053 AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
1054 }
1055
1056 template<int RegWidth, int Shift>
1057 bool isMOVZMovAlias() const {
1058 if (!isImm()) return false;
1059
1060 const MCExpr *E = getImm();
1061 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1062 uint64_t Value = CE->getValue();
1063
1064 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1065 }
1066 // Only supports the case of Shift being 0 if an expression is used as an
1067 // operand
1068 return !Shift && E;
1069 }
1070
1071 template<int RegWidth, int Shift>
1072 bool isMOVNMovAlias() const {
1073 if (!isImm()) return false;
1074
1075 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1076 if (!CE) return false;
1077 uint64_t Value = CE->getValue();
1078
1079 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1080 }
1081
1082 bool isFPImm() const {
1083 return Kind == k_FPImm &&
1084 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1085 }
1086
1087 bool isBarrier() const {
1088 return Kind == k_Barrier && !getBarriernXSModifier();
1089 }
1090 bool isBarriernXS() const {
1091 return Kind == k_Barrier && getBarriernXSModifier();
1092 }
1093 bool isSysReg() const { return Kind == k_SysReg; }
1094
1095 bool isMRSSystemRegister() const {
1096 if (!isSysReg()) return false;
1097
1098 return SysReg.MRSReg != -1U;
1099 }
1100
1101 bool isMSRSystemRegister() const {
1102 if (!isSysReg()) return false;
1103 return SysReg.MSRReg != -1U;
1104 }
1105
1106 bool isSystemPStateFieldWithImm0_1() const {
1107 if (!isSysReg()) return false;
1108 return (SysReg.PStateField == AArch64PState::PAN ||
1109 SysReg.PStateField == AArch64PState::DIT ||
1110 SysReg.PStateField == AArch64PState::UAO ||
1111 SysReg.PStateField == AArch64PState::SSBS);
1112 }
1113
1114 bool isSystemPStateFieldWithImm0_15() const {
1115 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1116 return SysReg.PStateField != -1U;
1117 }
1118
1119 bool isSVCR() const {
1120 if (Kind != k_SVCR)
1121 return false;
1122 return SVCR.PStateField != -1U;
1123 }
1124
1125 bool isReg() const override {
1126 return Kind == k_Register;
1127 }
1128
1129 bool isScalarReg() const {
1130 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1131 }
1132
1133 bool isNeonVectorReg() const {
1134 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1135 }
1136
1137 bool isNeonVectorRegLo() const {
1138 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1139 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1140 Reg.RegNum) ||
1141 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1142 Reg.RegNum));
1143 }
1144
1145 bool isMatrix() const { return Kind == k_MatrixRegister; }
1146
1147 template <unsigned Class> bool isSVEVectorReg() const {
1148 RegKind RK;
1149 switch (Class) {
1150 case AArch64::ZPRRegClassID:
1151 case AArch64::ZPR_3bRegClassID:
1152 case AArch64::ZPR_4bRegClassID:
1153 RK = RegKind::SVEDataVector;
1154 break;
1155 case AArch64::PPRRegClassID:
1156 case AArch64::PPR_3bRegClassID:
1157 RK = RegKind::SVEPredicateVector;
1158 break;
1159 default:
1160 llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class",
"/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1160)
;
1161 }
1162
1163 return (Kind == k_Register && Reg.Kind == RK) &&
1164 AArch64MCRegisterClasses[Class].contains(getReg());
1165 }
1166
1167 template <unsigned Class> bool isFPRasZPR() const {
1168 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1169 AArch64MCRegisterClasses[Class].contains(getReg());
1170 }
1171
1172 template <int ElementWidth, unsigned Class>
1173 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1174 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1175 return DiagnosticPredicateTy::NoMatch;
1176
1177 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1178 return DiagnosticPredicateTy::Match;
1179
1180 return DiagnosticPredicateTy::NearMatch;
1181 }
1182
1183 template <int ElementWidth, unsigned Class>
1184 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1185 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1186 return DiagnosticPredicateTy::NoMatch;
1187
1188 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1189 return DiagnosticPredicateTy::Match;
1190
1191 return DiagnosticPredicateTy::NearMatch;
1192 }
1193
1194 template <int ElementWidth, unsigned Class,
1195 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1196 bool ShiftWidthAlwaysSame>
1197 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1198 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1199 if (!VectorMatch.isMatch())
1200 return DiagnosticPredicateTy::NoMatch;
1201
1202 // Give a more specific diagnostic when the user has explicitly typed in
1203 // a shift-amount that does not match what is expected, but for which
1204 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1205 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1206 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1207 ShiftExtendTy == AArch64_AM::SXTW) &&
1208 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1209 return DiagnosticPredicateTy::NoMatch;
1210
1211 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1212 return DiagnosticPredicateTy::Match;
1213
1214 return DiagnosticPredicateTy::NearMatch;
1215 }
1216
1217 bool isGPR32as64() const {
1218 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1219 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1220 }
1221
1222 bool isGPR64as32() const {
1223 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1224 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1225 }
1226
1227 bool isGPR64x8() const {
1228 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1229 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1230 Reg.RegNum);
1231 }
1232
1233 bool isWSeqPair() const {
1234 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1235 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1236 Reg.RegNum);
1237 }
1238
1239 bool isXSeqPair() const {
1240 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1241 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1242 Reg.RegNum);
1243 }
1244
1245 template<int64_t Angle, int64_t Remainder>
1246 DiagnosticPredicate isComplexRotation() const {
1247 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1248
1249 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1250 if (!CE) return DiagnosticPredicateTy::NoMatch;
1251 uint64_t Value = CE->getValue();
1252
1253 if (Value % Angle == Remainder && Value <= 270)
1254 return DiagnosticPredicateTy::Match;
1255 return DiagnosticPredicateTy::NearMatch;
1256 }
1257
1258 template <unsigned RegClassID> bool isGPR64() const {
1259 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1260 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1261 }
1262
1263 template <unsigned RegClassID, int ExtWidth>
1264 DiagnosticPredicate isGPR64WithShiftExtend() const {
1265 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1266 return DiagnosticPredicateTy::NoMatch;
1267
1268 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1269 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1270 return DiagnosticPredicateTy::Match;
1271 return DiagnosticPredicateTy::NearMatch;
1272 }
1273
1274 /// Is this a vector list with the type implicit (presumably attached to the
1275 /// instruction itself)?
1276 template <RegKind VectorKind, unsigned NumRegs>
1277 bool isImplicitlyTypedVectorList() const {
1278 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1279 VectorList.NumElements == 0 &&
1280 VectorList.RegisterKind == VectorKind;
1281 }
1282
1283 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1284 unsigned ElementWidth>
1285 bool isTypedVectorList() const {
1286 if (Kind != k_VectorList)
1287 return false;
1288 if (VectorList.Count != NumRegs)
1289 return false;
1290 if (VectorList.RegisterKind != VectorKind)
1291 return false;
1292 if (VectorList.ElementWidth != ElementWidth)
1293 return false;
1294 return VectorList.NumElements == NumElements;
1295 }
1296
1297 template <int Min, int Max>
1298 DiagnosticPredicate isVectorIndex() const {
1299 if (Kind != k_VectorIndex)
1300 return DiagnosticPredicateTy::NoMatch;
1301 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1302 return DiagnosticPredicateTy::Match;
1303 return DiagnosticPredicateTy::NearMatch;
1304 }
1305
1306 bool isToken() const override { return Kind == k_Token; }
1307
1308 bool isTokenEqual(StringRef Str) const {
1309 return Kind == k_Token && getToken() == Str;
1310 }
1311 bool isSysCR() const { return Kind == k_SysCR; }
1312 bool isPrefetch() const { return Kind == k_Prefetch; }
1313 bool isPSBHint() const { return Kind == k_PSBHint; }
1314 bool isBTIHint() const { return Kind == k_BTIHint; }
1315 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1316 bool isShifter() const {
1317 if (!isShiftExtend())
1318 return false;
1319
1320 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1321 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1322 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1323 ST == AArch64_AM::MSL);
1324 }
1325
1326 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1327 if (Kind != k_FPImm)
1328 return DiagnosticPredicateTy::NoMatch;
1329
1330 if (getFPImmIsExact()) {
1331 // Lookup the immediate from table of supported immediates.
1332 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1333 assert(Desc && "Unknown enum value")(static_cast <bool> (Desc && "Unknown enum value"
) ? void (0) : __assert_fail ("Desc && \"Unknown enum value\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1333, __extension__ __PRETTY_FUNCTION__))
;
1334
1335 // Calculate its FP value.
1336 APFloat RealVal(APFloat::IEEEdouble());
1337 auto StatusOrErr =
1338 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1339 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1340 llvm_unreachable("FP immediate is not exact")::llvm::llvm_unreachable_internal("FP immediate is not exact"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1340)
;
1341
1342 if (getFPImm().bitwiseIsEqual(RealVal))
1343 return DiagnosticPredicateTy::Match;
1344 }
1345
1346 return DiagnosticPredicateTy::NearMatch;
1347 }
1348
1349 template <unsigned ImmA, unsigned ImmB>
1350 DiagnosticPredicate isExactFPImm() const {
1351 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1352 if ((Res = isExactFPImm<ImmA>()))
1353 return DiagnosticPredicateTy::Match;
1354 if ((Res = isExactFPImm<ImmB>()))
1355 return DiagnosticPredicateTy::Match;
1356 return Res;
1357 }
1358
1359 bool isExtend() const {
1360 if (!isShiftExtend())
1361 return false;
1362
1363 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1364 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1365 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1366 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1367 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1368 ET == AArch64_AM::LSL) &&
1369 getShiftExtendAmount() <= 4;
1370 }
1371
1372 bool isExtend64() const {
1373 if (!isExtend())
1374 return false;
1375 // Make sure the extend expects a 32-bit source register.
1376 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1377 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1378 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1379 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1380 }
1381
1382 bool isExtendLSL64() const {
1383 if (!isExtend())
1384 return false;
1385 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1386 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1387 ET == AArch64_AM::LSL) &&
1388 getShiftExtendAmount() <= 4;
1389 }
1390
1391 template<int Width> bool isMemXExtend() const {
1392 if (!isExtend())
1393 return false;
1394 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1395 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1396 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1397 getShiftExtendAmount() == 0);
1398 }
1399
1400 template<int Width> bool isMemWExtend() const {
1401 if (!isExtend())
1402 return false;
1403 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1404 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1405 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1406 getShiftExtendAmount() == 0);
1407 }
1408
1409 template <unsigned width>
1410 bool isArithmeticShifter() const {
1411 if (!isShifter())
1412 return false;
1413
1414 // An arithmetic shifter is LSL, LSR, or ASR.
1415 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1416 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1417 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1418 }
1419
1420 template <unsigned width>
1421 bool isLogicalShifter() const {
1422 if (!isShifter())
1423 return false;
1424
1425 // A logical shifter is LSL, LSR, ASR or ROR.
1426 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1427 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1428 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1429 getShiftExtendAmount() < width;
1430 }
1431
1432 bool isMovImm32Shifter() const {
1433 if (!isShifter())
1434 return false;
1435
1436 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1437 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1438 if (ST != AArch64_AM::LSL)
1439 return false;
1440 uint64_t Val = getShiftExtendAmount();
1441 return (Val == 0 || Val == 16);
1442 }
1443
1444 bool isMovImm64Shifter() const {
1445 if (!isShifter())
1446 return false;
1447
1448 // A MOVi shifter is LSL of 0 or 16.
1449 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1450 if (ST != AArch64_AM::LSL)
1451 return false;
1452 uint64_t Val = getShiftExtendAmount();
1453 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1454 }
1455
1456 bool isLogicalVecShifter() const {
1457 if (!isShifter())
1458 return false;
1459
1460 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1461 unsigned Shift = getShiftExtendAmount();
1462 return getShiftExtendType() == AArch64_AM::LSL &&
1463 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1464 }
1465
1466 bool isLogicalVecHalfWordShifter() const {
1467 if (!isLogicalVecShifter())
1468 return false;
1469
1470 // A logical vector shifter is a left shift by 0 or 8.
1471 unsigned Shift = getShiftExtendAmount();
1472 return getShiftExtendType() == AArch64_AM::LSL &&
1473 (Shift == 0 || Shift == 8);
1474 }
1475
1476 bool isMoveVecShifter() const {
1477 if (!isShiftExtend())
1478 return false;
1479
1480 // A logical vector shifter is a left shift by 8 or 16.
1481 unsigned Shift = getShiftExtendAmount();
1482 return getShiftExtendType() == AArch64_AM::MSL &&
1483 (Shift == 8 || Shift == 16);
1484 }
1485
1486 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1487 // to LDUR/STUR when the offset is not legal for the former but is for
1488 // the latter. As such, in addition to checking for being a legal unscaled
1489 // address, also check that it is not a legal scaled address. This avoids
1490 // ambiguity in the matcher.
1491 template<int Width>
1492 bool isSImm9OffsetFB() const {
1493 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1494 }
1495
1496 bool isAdrpLabel() const {
1497 // Validation was handled during parsing, so we just sanity check that
1498 // something didn't go haywire.
1499 if (!isImm())
1500 return false;
1501
1502 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1503 int64_t Val = CE->getValue();
1504 int64_t Min = - (4096 * (1LL << (21 - 1)));
1505 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1506 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1507 }
1508
1509 return true;
1510 }
1511
1512 bool isAdrLabel() const {
1513 // Validation was handled during parsing, so we just sanity check that
1514 // something didn't go haywire.
1515 if (!isImm())
1516 return false;
1517
1518 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1519 int64_t Val = CE->getValue();
1520 int64_t Min = - (1LL << (21 - 1));
1521 int64_t Max = ((1LL << (21 - 1)) - 1);
1522 return Val >= Min && Val <= Max;
1523 }
1524
1525 return true;
1526 }
1527
1528 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1529 DiagnosticPredicate isMatrixRegOperand() const {
1530 if (!isMatrix())
1531 return DiagnosticPredicateTy::NoMatch;
1532 if (getMatrixKind() != Kind ||
1533 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1534 EltSize != getMatrixElementWidth())
1535 return DiagnosticPredicateTy::NearMatch;
1536 return DiagnosticPredicateTy::Match;
1537 }
1538
1539 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1540 // Add as immediates when possible. Null MCExpr = 0.
1541 if (!Expr)
1542 Inst.addOperand(MCOperand::createImm(0));
1543 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1544 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1545 else
1546 Inst.addOperand(MCOperand::createExpr(Expr));
1547 }
1548
1549 void addRegOperands(MCInst &Inst, unsigned N) const {
1550 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1550, __extension__ __PRETTY_FUNCTION__))
;
1551 Inst.addOperand(MCOperand::createReg(getReg()));
1552 }
1553
1554 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1555 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1555, __extension__ __PRETTY_FUNCTION__))
;
1556 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1557 }
1558
1559 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1560 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1560, __extension__ __PRETTY_FUNCTION__))
;
1561 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1562, __extension__ __PRETTY_FUNCTION__))
1562 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1562, __extension__ __PRETTY_FUNCTION__))
;
1563
1564 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1565 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1566 RI->getEncodingValue(getReg()));
1567
1568 Inst.addOperand(MCOperand::createReg(Reg));
1569 }
1570
1571 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1572 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1572, __extension__ __PRETTY_FUNCTION__))
;
1573 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1574, __extension__ __PRETTY_FUNCTION__))
1574 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1574, __extension__ __PRETTY_FUNCTION__))
;
1575
1576 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1577 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1578 RI->getEncodingValue(getReg()));
1579
1580 Inst.addOperand(MCOperand::createReg(Reg));
1581 }
1582
1583 template <int Width>
1584 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1585 unsigned Base;
1586 switch (Width) {
1587 case 8: Base = AArch64::B0; break;
1588 case 16: Base = AArch64::H0; break;
1589 case 32: Base = AArch64::S0; break;
1590 case 64: Base = AArch64::D0; break;
1591 case 128: Base = AArch64::Q0; break;
1592 default:
1593 llvm_unreachable("Unsupported width")::llvm::llvm_unreachable_internal("Unsupported width", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1593)
;
1594 }
1595 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1596 }
1597
1598 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1599 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1599, __extension__ __PRETTY_FUNCTION__))
;
1600 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1601, __extension__ __PRETTY_FUNCTION__))
1601 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1601, __extension__ __PRETTY_FUNCTION__))
;
1602 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1603 }
1604
1605 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1606 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1606, __extension__ __PRETTY_FUNCTION__))
;
1607 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1608, __extension__ __PRETTY_FUNCTION__))
1608 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1608, __extension__ __PRETTY_FUNCTION__))
;
1609 Inst.addOperand(MCOperand::createReg(getReg()));
1610 }
1611
1612 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1613 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1613, __extension__ __PRETTY_FUNCTION__))
;
1614 Inst.addOperand(MCOperand::createReg(getReg()));
1615 }
1616
1617 enum VecListIndexType {
1618 VecListIdx_DReg = 0,
1619 VecListIdx_QReg = 1,
1620 VecListIdx_ZReg = 2,
1621 };
1622
1623 template <VecListIndexType RegTy, unsigned NumRegs>
1624 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1625 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1625, __extension__ __PRETTY_FUNCTION__))
;
1626 static const unsigned FirstRegs[][5] = {
1627 /* DReg */ { AArch64::Q0,
1628 AArch64::D0, AArch64::D0_D1,
1629 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1630 /* QReg */ { AArch64::Q0,
1631 AArch64::Q0, AArch64::Q0_Q1,
1632 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1633 /* ZReg */ { AArch64::Z0,
1634 AArch64::Z0, AArch64::Z0_Z1,
1635 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1636 };
1637
1638 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs
<= 4) && " NumRegs must be <= 4 for ZRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1639, __extension__ __PRETTY_FUNCTION__))
1639 " NumRegs must be <= 4 for ZRegs")(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs
<= 4) && " NumRegs must be <= 4 for ZRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1639, __extension__ __PRETTY_FUNCTION__))
;
1640
1641 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1642 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1643 FirstRegs[(unsigned)RegTy][0]));
1644 }
1645
1646 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1647 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1647, __extension__ __PRETTY_FUNCTION__))
;
1648 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1649 }
1650
1651 template <unsigned ImmIs0, unsigned ImmIs1>
1652 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1653 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1653, __extension__ __PRETTY_FUNCTION__))
;
1654 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")(static_cast <bool> (bool(isExactFPImm<ImmIs0, ImmIs1
>()) && "Invalid operand") ? void (0) : __assert_fail
("bool(isExactFPImm<ImmIs0, ImmIs1>()) && \"Invalid operand\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1654, __extension__ __PRETTY_FUNCTION__))
;
1655 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1656 }
1657
1658 void addImmOperands(MCInst &Inst, unsigned N) const {
1659 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1659, __extension__ __PRETTY_FUNCTION__))
;
1660 // If this is a pageoff symrefexpr with an addend, adjust the addend
1661 // to be only the page-offset portion. Otherwise, just add the expr
1662 // as-is.
1663 addExpr(Inst, getImm());
1664 }
1665
1666 template <int Shift>
1667 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1668 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1668, __extension__ __PRETTY_FUNCTION__))
;
1669 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1670 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1671 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1672 } else if (isShiftedImm()) {
1673 addExpr(Inst, getShiftedImmVal());
1674 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1675 } else {
1676 addExpr(Inst, getImm());
1677 Inst.addOperand(MCOperand::createImm(0));
1678 }
1679 }
1680
1681 template <int Shift>
1682 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1683 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1683, __extension__ __PRETTY_FUNCTION__))
;
1684 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1685 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1686 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1687 } else
1688 llvm_unreachable("Not a shifted negative immediate")::llvm::llvm_unreachable_internal("Not a shifted negative immediate"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1688)
;
1689 }
1690
1691 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1692 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1692, __extension__ __PRETTY_FUNCTION__))
;
1693 Inst.addOperand(MCOperand::createImm(getCondCode()));
1694 }
1695
1696 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1697 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1697, __extension__ __PRETTY_FUNCTION__))
;
1698 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1699 if (!MCE)
1700 addExpr(Inst, getImm());
1701 else
1702 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1703 }
1704
1705 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1706 addImmOperands(Inst, N);
1707 }
1708
1709 template<int Scale>
1710 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1711 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1711, __extension__ __PRETTY_FUNCTION__))
;
1712 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1713
1714 if (!MCE) {
1715 Inst.addOperand(MCOperand::createExpr(getImm()));
1716 return;
1717 }
1718 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1719 }
1720
1721 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1722 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1722, __extension__ __PRETTY_FUNCTION__))
;
1723 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1724 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1725 }
1726
1727 template <int Scale>
1728 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1729 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1729, __extension__ __PRETTY_FUNCTION__))
;
1730 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1731 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1732 }
1733
1734 template <typename T>
1735 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1736 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1736, __extension__ __PRETTY_FUNCTION__))
;
1737 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1738 std::make_unsigned_t<T> Val = MCE->getValue();
1739 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1740 Inst.addOperand(MCOperand::createImm(encoding));
1741 }
1742
1743 template <typename T>
1744 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1745 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1745, __extension__ __PRETTY_FUNCTION__))
;
1746 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1747 std::make_unsigned_t<T> Val = ~MCE->getValue();
1748 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1749 Inst.addOperand(MCOperand::createImm(encoding));
1750 }
1751
1752 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1753 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1753, __extension__ __PRETTY_FUNCTION__))
;
1754 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1755 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1756 Inst.addOperand(MCOperand::createImm(encoding));
1757 }
1758
1759 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1760 // Branch operands don't encode the low bits, so shift them off
1761 // here. If it's a label, however, just put it on directly as there's
1762 // not enough information now to do anything.
1763 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1763, __extension__ __PRETTY_FUNCTION__))
;
1764 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1765 if (!MCE) {
1766 addExpr(Inst, getImm());
1767 return;
1768 }
1769 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1769, __extension__ __PRETTY_FUNCTION__))
;
1770 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1771 }
1772
1773 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1774 // Branch operands don't encode the low bits, so shift them off
1775 // here. If it's a label, however, just put it on directly as there's
1776 // not enough information now to do anything.
1777 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1777, __extension__ __PRETTY_FUNCTION__))
;
1778 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1779 if (!MCE) {
1780 addExpr(Inst, getImm());
1781 return;
1782 }
1783 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1783, __extension__ __PRETTY_FUNCTION__))
;
1784 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1785 }
1786
1787 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1788 // Branch operands don't encode the low bits, so shift them off
1789 // here. If it's a label, however, just put it on directly as there's
1790 // not enough information now to do anything.
1791 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1791, __extension__ __PRETTY_FUNCTION__))
;
1792 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1793 if (!MCE) {
1794 addExpr(Inst, getImm());
1795 return;
1796 }
1797 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1797, __extension__ __PRETTY_FUNCTION__))
;
1798 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1799 }
1800
1801 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1802 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1802, __extension__ __PRETTY_FUNCTION__))
;
1803 Inst.addOperand(MCOperand::createImm(
1804 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1805 }
1806
1807 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1808 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1808, __extension__ __PRETTY_FUNCTION__))
;
1809 Inst.addOperand(MCOperand::createImm(getBarrier()));
1810 }
1811
1812 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
1813 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1813, __extension__ __PRETTY_FUNCTION__))
;
1814 Inst.addOperand(MCOperand::createImm(getBarrier()));
1815 }
1816
1817 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1818 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1818, __extension__ __PRETTY_FUNCTION__))
;
1819
1820 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1821 }
1822
1823 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1824 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1824, __extension__ __PRETTY_FUNCTION__))
;
1825
1826 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1827 }
1828
1829 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1830 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1830, __extension__ __PRETTY_FUNCTION__))
;
1831
1832 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1833 }
1834
1835 void addSVCROperands(MCInst &Inst, unsigned N) const {
1836 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1836, __extension__ __PRETTY_FUNCTION__))
;
1837
1838 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
1839 }
1840
1841 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1842 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1842, __extension__ __PRETTY_FUNCTION__))
;
1843
1844 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1845 }
1846
1847 void addSysCROperands(MCInst &Inst, unsigned N) const {
1848 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1848, __extension__ __PRETTY_FUNCTION__))
;
1849 Inst.addOperand(MCOperand::createImm(getSysCR()));
1850 }
1851
1852 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1853 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1853, __extension__ __PRETTY_FUNCTION__))
;
1854 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1855 }
1856
1857 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1858 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1858, __extension__ __PRETTY_FUNCTION__))
;
1859 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1860 }
1861
1862 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1863 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1863, __extension__ __PRETTY_FUNCTION__))
;
1864 Inst.addOperand(MCOperand::createImm(getBTIHint()));
1865 }
1866
1867 void addShifterOperands(MCInst &Inst, unsigned N) const {
1868 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1868, __extension__ __PRETTY_FUNCTION__))
;
1869 unsigned Imm =
1870 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1871 Inst.addOperand(MCOperand::createImm(Imm));
1872 }
1873
1874 void addExtendOperands(MCInst &Inst, unsigned N) const {
1875 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1875, __extension__ __PRETTY_FUNCTION__))
;
1876 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1877 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1878 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1879 Inst.addOperand(MCOperand::createImm(Imm));
1880 }
1881
1882 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1883 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1883, __extension__ __PRETTY_FUNCTION__))
;
1884 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1885 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1886 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1887 Inst.addOperand(MCOperand::createImm(Imm));
1888 }
1889
1890 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1891 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1891, __extension__ __PRETTY_FUNCTION__))
;
1892 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1893 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1894 Inst.addOperand(MCOperand::createImm(IsSigned));
1895 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1896 }
1897
1898 // For 8-bit load/store instructions with a register offset, both the
1899 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1900 // they're disambiguated by whether the shift was explicit or implicit rather
1901 // than its size.
1902 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1903 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1903, __extension__ __PRETTY_FUNCTION__))
;
1904 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1905 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1906 Inst.addOperand(MCOperand::createImm(IsSigned));
1907 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1908 }
1909
1910 template<int Shift>
1911 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1912 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1912, __extension__ __PRETTY_FUNCTION__))
;
1913
1914 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1915 if (CE) {
1916 uint64_t Value = CE->getValue();
1917 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1918 } else {
1919 addExpr(Inst, getImm());
1920 }
1921 }
1922
1923 template<int Shift>
1924 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1925 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1925, __extension__ __PRETTY_FUNCTION__))
;
1926
1927 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1928 uint64_t Value = CE->getValue();
1929 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1930 }
1931
1932 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1933 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1933, __extension__ __PRETTY_FUNCTION__))
;
1934 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1935 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1936 }
1937
1938 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1939 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1939, __extension__ __PRETTY_FUNCTION__))
;
1940 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1941 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1942 }
1943
1944 void print(raw_ostream &OS) const override;
1945
1946 static std::unique_ptr<AArch64Operand>
1947 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1948 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1949 Op->Tok.Data = Str.data();
1950 Op->Tok.Length = Str.size();
1951 Op->Tok.IsSuffix = IsSuffix;
1952 Op->StartLoc = S;
1953 Op->EndLoc = S;
1954 return Op;
1955 }
1956
1957 static std::unique_ptr<AArch64Operand>
1958 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1959 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1960 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1961 unsigned ShiftAmount = 0,
1962 unsigned HasExplicitAmount = false) {
1963 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1964 Op->Reg.RegNum = RegNum;
1965 Op->Reg.Kind = Kind;
1966 Op->Reg.ElementWidth = 0;
1967 Op->Reg.EqualityTy = EqTy;
1968 Op->Reg.ShiftExtend.Type = ExtTy;
1969 Op->Reg.ShiftExtend.Amount = ShiftAmount;
1970 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1971 Op->StartLoc = S;
1972 Op->EndLoc = E;
1973 return Op;
1974 }
1975
1976 static std::unique_ptr<AArch64Operand>
1977 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1978 SMLoc S, SMLoc E, MCContext &Ctx,
1979 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1980 unsigned ShiftAmount = 0,
1981 unsigned HasExplicitAmount = false) {
1982 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
) && "Invalid vector kind") ? void (0) : __assert_fail
("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1984, __extension__ __PRETTY_FUNCTION__))
1983 Kind == RegKind::SVEPredicateVector) &&(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
) && "Invalid vector kind") ? void (0) : __assert_fail
("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1984, __extension__ __PRETTY_FUNCTION__))
1984 "Invalid vector kind")(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
) && "Invalid vector kind") ? void (0) : __assert_fail
("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1984, __extension__ __PRETTY_FUNCTION__))
;
1985 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1986 HasExplicitAmount);
1987 Op->Reg.ElementWidth = ElementWidth;
1988 return Op;
1989 }
1990
1991 static std::unique_ptr<AArch64Operand>
1992 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1993 unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1994 MCContext &Ctx) {
1995 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
1996 Op->VectorList.RegNum = RegNum;
1997 Op->VectorList.Count = Count;
1998 Op->VectorList.NumElements = NumElements;
1999 Op->VectorList.ElementWidth = ElementWidth;
2000 Op->VectorList.RegisterKind = RegisterKind;
2001 Op->StartLoc = S;
2002 Op->EndLoc = E;
2003 return Op;
2004 }
2005
2006 static std::unique_ptr<AArch64Operand>
2007 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2008 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2009 Op->VectorIndex.Val = Idx;
2010 Op->StartLoc = S;
2011 Op->EndLoc = E;
2012 return Op;
2013 }
2014
2015 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2016 SMLoc E, MCContext &Ctx) {
2017 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2018 Op->Imm.Val = Val;
2019 Op->StartLoc = S;
2020 Op->EndLoc = E;
2021 return Op;
2022 }
2023
2024 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2025 unsigned ShiftAmount,
2026 SMLoc S, SMLoc E,
2027 MCContext &Ctx) {
2028 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2029 Op->ShiftedImm .Val = Val;
2030 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2031 Op->StartLoc = S;
2032 Op->EndLoc = E;
2033 return Op;
2034 }
2035
2036 static std::unique_ptr<AArch64Operand>
2037 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2038 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2039 Op->CondCode.Code = Code;
2040 Op->StartLoc = S;
2041 Op->EndLoc = E;
2042 return Op;
2043 }
2044
2045 static std::unique_ptr<AArch64Operand>
2046 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2047 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2048 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2049 Op->FPImm.IsExact = IsExact;
2050 Op->StartLoc = S;
2051 Op->EndLoc = S;
2052 return Op;
2053 }
2054
2055 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2056 StringRef Str,
2057 SMLoc S,
2058 MCContext &Ctx,
2059 bool HasnXSModifier) {
2060 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2061 Op->Barrier.Val = Val;
2062 Op->Barrier.Data = Str.data();
2063 Op->Barrier.Length = Str.size();
2064 Op->Barrier.HasnXSModifier = HasnXSModifier;
2065 Op->StartLoc = S;
2066 Op->EndLoc = S;
2067 return Op;
2068 }
2069
2070 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2071 uint32_t MRSReg,
2072 uint32_t MSRReg,
2073 uint32_t PStateField,
2074 MCContext &Ctx) {
2075 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2076 Op->SysReg.Data = Str.data();
2077 Op->SysReg.Length = Str.size();
2078 Op->SysReg.MRSReg = MRSReg;
2079 Op->SysReg.MSRReg = MSRReg;
2080 Op->SysReg.PStateField = PStateField;
2081 Op->StartLoc = S;
2082 Op->EndLoc = S;
2083 return Op;
2084 }
2085
2086 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2087 SMLoc E, MCContext &Ctx) {
2088 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2089 Op->SysCRImm.Val = Val;
2090 Op->StartLoc = S;
2091 Op->EndLoc = E;
2092 return Op;
2093 }
2094
2095 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2096 StringRef Str,
2097 SMLoc S,
2098 MCContext &Ctx) {
2099 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2100 Op->Prefetch.Val = Val;
2101 Op->Barrier.Data = Str.data();
2102 Op->Barrier.Length = Str.size();
2103 Op->StartLoc = S;
2104 Op->EndLoc = S;
2105 return Op;
2106 }
2107
2108 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2109 StringRef Str,
2110 SMLoc S,
2111 MCContext &Ctx) {
2112 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2113 Op->PSBHint.Val = Val;
2114 Op->PSBHint.Data = Str.data();
2115 Op->PSBHint.Length = Str.size();
2116 Op->StartLoc = S;
2117 Op->EndLoc = S;
2118 return Op;
2119 }
2120
2121 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2122 StringRef Str,
2123 SMLoc S,
2124 MCContext &Ctx) {
2125 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2126 Op->BTIHint.Val = Val | 32;
2127 Op->BTIHint.Data = Str.data();
2128 Op->BTIHint.Length = Str.size();
2129 Op->StartLoc = S;
2130 Op->EndLoc = S;
2131 return Op;
2132 }
2133
2134 static std::unique_ptr<AArch64Operand>
2135 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2136 SMLoc S, SMLoc E, MCContext &Ctx) {
2137 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2138 Op->MatrixReg.RegNum = RegNum;
2139 Op->MatrixReg.ElementWidth = ElementWidth;
2140 Op->MatrixReg.Kind = Kind;
2141 Op->StartLoc = S;
2142 Op->EndLoc = E;
2143 return Op;
2144 }
2145
2146 static std::unique_ptr<AArch64Operand>
2147 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2148 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2149 Op->SVCR.PStateField = PStateField;
2150 Op->SVCR.Data = Str.data();
2151 Op->SVCR.Length = Str.size();
2152 Op->StartLoc = S;
2153 Op->EndLoc = S;
2154 return Op;
2155 }
2156
2157 static std::unique_ptr<AArch64Operand>
2158 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2159 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2160 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2161 Op->ShiftExtend.Type = ShOp;
2162 Op->ShiftExtend.Amount = Val;
2163 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2164 Op->StartLoc = S;
2165 Op->EndLoc = E;
2166 return Op;
2167 }
2168};
2169
2170} // end anonymous namespace.
2171
2172void AArch64Operand::print(raw_ostream &OS) const {
2173 switch (Kind) {
2174 case k_FPImm:
2175 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2176 if (!getFPImmIsExact())
2177 OS << " (inexact)";
2178 OS << ">";
2179 break;
2180 case k_Barrier: {
2181 StringRef Name = getBarrierName();
2182 if (!Name.empty())
2183 OS << "<barrier " << Name << ">";
2184 else
2185 OS << "<barrier invalid #" << getBarrier() << ">";
2186 break;
2187 }
2188 case k_Immediate:
2189 OS << *getImm();
2190 break;
2191 case k_ShiftedImm: {
2192 unsigned Shift = getShiftedImmShift();
2193 OS << "<shiftedimm ";
2194 OS << *getShiftedImmVal();
2195 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2196 break;
2197 }
2198 case k_CondCode:
2199 OS << "<condcode " << getCondCode() << ">";
2200 break;
2201 case k_VectorList: {
2202 OS << "<vectorlist ";
2203 unsigned Reg = getVectorListStart();
2204 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2205 OS << Reg + i << " ";
2206 OS << ">";
2207 break;
2208 }
2209 case k_VectorIndex:
2210 OS << "<vectorindex " << getVectorIndex() << ">";
2211 break;
2212 case k_SysReg:
2213 OS << "<sysreg: " << getSysReg() << '>';
2214 break;
2215 case k_Token:
2216 OS << "'" << getToken() << "'";
2217 break;
2218 case k_SysCR:
2219 OS << "c" << getSysCR();
2220 break;
2221 case k_Prefetch: {
2222 StringRef Name = getPrefetchName();
2223 if (!Name.empty())
2224 OS << "<prfop " << Name << ">";
2225 else
2226 OS << "<prfop invalid #" << getPrefetch() << ">";
2227 break;
2228 }
2229 case k_PSBHint:
2230 OS << getPSBHintName();
2231 break;
2232 case k_BTIHint:
2233 OS << getBTIHintName();
2234 break;
2235 case k_MatrixRegister:
2236 OS << "<matrix " << getMatrixReg() << ">";
2237 break;
2238 case k_SVCR: {
2239 OS << getSVCR();
2240 break;
2241 }
2242 case k_Register:
2243 OS << "<register " << getReg() << ">";
2244 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2245 break;
2246 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2247 case k_ShiftExtend:
2248 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2249 << getShiftExtendAmount();
2250 if (!hasShiftExtendAmount())
2251 OS << "<imp>";
2252 OS << '>';
2253 break;
2254 }
2255}
2256
2257/// @name Auto-generated Match Functions
2258/// {
2259
2260static unsigned MatchRegisterName(StringRef Name);
2261
2262/// }
2263
2264static unsigned MatchNeonVectorRegName(StringRef Name) {
2265 return StringSwitch<unsigned>(Name.lower())
2266 .Case("v0", AArch64::Q0)
2267 .Case("v1", AArch64::Q1)
2268 .Case("v2", AArch64::Q2)
2269 .Case("v3", AArch64::Q3)
2270 .Case("v4", AArch64::Q4)
2271 .Case("v5", AArch64::Q5)
2272 .Case("v6", AArch64::Q6)
2273 .Case("v7", AArch64::Q7)
2274 .Case("v8", AArch64::Q8)
2275 .Case("v9", AArch64::Q9)
2276 .Case("v10", AArch64::Q10)
2277 .Case("v11", AArch64::Q11)
2278 .Case("v12", AArch64::Q12)
2279 .Case("v13", AArch64::Q13)
2280 .Case("v14", AArch64::Q14)
2281 .Case("v15", AArch64::Q15)
2282 .Case("v16", AArch64::Q16)
2283 .Case("v17", AArch64::Q17)
2284 .Case("v18", AArch64::Q18)
2285 .Case("v19", AArch64::Q19)
2286 .Case("v20", AArch64::Q20)
2287 .Case("v21", AArch64::Q21)
2288 .Case("v22", AArch64::Q22)
2289 .Case("v23", AArch64::Q23)
2290 .Case("v24", AArch64::Q24)
2291 .Case("v25", AArch64::Q25)
2292 .Case("v26", AArch64::Q26)
2293 .Case("v27", AArch64::Q27)
2294 .Case("v28", AArch64::Q28)
2295 .Case("v29", AArch64::Q29)
2296 .Case("v30", AArch64::Q30)
2297 .Case("v31", AArch64::Q31)
2298 .Default(0);
2299}
2300
2301/// Returns an optional pair of (#elements, element-width) if Suffix
2302/// is a valid vector kind. Where the number of elements in a vector
2303/// or the vector width is implicit or explicitly unknown (but still a
2304/// valid suffix kind), 0 is used.
2305static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2306 RegKind VectorKind) {
2307 std::pair<int, int> Res = {-1, -1};
2308
2309 switch (VectorKind) {
2310 case RegKind::NeonVector:
2311 Res =
2312 StringSwitch<std::pair<int, int>>(Suffix.lower())
2313 .Case("", {0, 0})
2314 .Case(".1d", {1, 64})
2315 .Case(".1q", {1, 128})
2316 // '.2h' needed for fp16 scalar pairwise reductions
2317 .Case(".2h", {2, 16})
2318 .Case(".2s", {2, 32})
2319 .Case(".2d", {2, 64})
2320 // '.4b' is another special case for the ARMv8.2a dot product
2321 // operand
2322 .Case(".4b", {4, 8})
2323 .Case(".4h", {4, 16})
2324 .Case(".4s", {4, 32})
2325 .Case(".8b", {8, 8})
2326 .Case(".8h", {8, 16})
2327 .Case(".16b", {16, 8})
2328 // Accept the width neutral ones, too, for verbose syntax. If those
2329 // aren't used in the right places, the token operand won't match so
2330 // all will work out.
2331 .Case(".b", {0, 8})
2332 .Case(".h", {0, 16})
2333 .Case(".s", {0, 32})
2334 .Case(".d", {0, 64})
2335 .Default({-1, -1});
2336 break;
2337 case RegKind::SVEPredicateVector:
2338 case RegKind::SVEDataVector:
2339 case RegKind::Matrix:
2340 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2341 .Case("", {0, 0})
2342 .Case(".b", {0, 8})
2343 .Case(".h", {0, 16})
2344 .Case(".s", {0, 32})
2345 .Case(".d", {0, 64})
2346 .Case(".q", {0, 128})
2347 .Default({-1, -1});
2348 break;
2349 default:
2350 llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2350)
;
2351 }
2352
2353 if (Res == std::make_pair(-1, -1))
2354 return Optional<std::pair<int, int>>();
2355
2356 return Optional<std::pair<int, int>>(Res);
2357}
2358
2359static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2360 return parseVectorKind(Suffix, VectorKind).hasValue();
2361}
2362
2363static unsigned matchSVEDataVectorRegName(StringRef Name) {
2364 return StringSwitch<unsigned>(Name.lower())
2365 .Case("z0", AArch64::Z0)
2366 .Case("z1", AArch64::Z1)
2367 .Case("z2", AArch64::Z2)
2368 .Case("z3", AArch64::Z3)
2369 .Case("z4", AArch64::Z4)
2370 .Case("z5", AArch64::Z5)
2371 .Case("z6", AArch64::Z6)
2372 .Case("z7", AArch64::Z7)
2373 .Case("z8", AArch64::Z8)
2374 .Case("z9", AArch64::Z9)
2375 .Case("z10", AArch64::Z10)
2376 .Case("z11", AArch64::Z11)
2377 .Case("z12", AArch64::Z12)
2378 .Case("z13", AArch64::Z13)
2379 .Case("z14", AArch64::Z14)
2380 .Case("z15", AArch64::Z15)
2381 .Case("z16", AArch64::Z16)
2382 .Case("z17", AArch64::Z17)
2383 .Case("z18", AArch64::Z18)
2384 .Case("z19", AArch64::Z19)
2385 .Case("z20", AArch64::Z20)
2386 .Case("z21", AArch64::Z21)
2387 .Case("z22", AArch64::Z22)
2388 .Case("z23", AArch64::Z23)
2389 .Case("z24", AArch64::Z24)
2390 .Case("z25", AArch64::Z25)
2391 .Case("z26", AArch64::Z26)
2392 .Case("z27", AArch64::Z27)
2393 .Case("z28", AArch64::Z28)
2394 .Case("z29", AArch64::Z29)
2395 .Case("z30", AArch64::Z30)
2396 .Case("z31", AArch64::Z31)
2397 .Default(0);
2398}
2399
2400static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2401 return StringSwitch<unsigned>(Name.lower())
2402 .Case("p0", AArch64::P0)
2403 .Case("p1", AArch64::P1)
2404 .Case("p2", AArch64::P2)
2405 .Case("p3", AArch64::P3)
2406 .Case("p4", AArch64::P4)
2407 .Case("p5", AArch64::P5)
2408 .Case("p6", AArch64::P6)
2409 .Case("p7", AArch64::P7)
2410 .Case("p8", AArch64::P8)
2411 .Case("p9", AArch64::P9)
2412 .Case("p10", AArch64::P10)
2413 .Case("p11", AArch64::P11)
2414 .Case("p12", AArch64::P12)
2415 .Case("p13", AArch64::P13)
2416 .Case("p14", AArch64::P14)
2417 .Case("p15", AArch64::P15)
2418 .Default(0);
2419}
2420
2421static unsigned matchMatrixRegName(StringRef Name) {
2422 return StringSwitch<unsigned>(Name.lower())
2423 .Case("za", AArch64::ZA)
2424 .Case("za0.q", AArch64::ZAQ0)
2425 .Case("za1.q", AArch64::ZAQ1)
2426 .Case("za2.q", AArch64::ZAQ2)
2427 .Case("za3.q", AArch64::ZAQ3)
2428 .Case("za4.q", AArch64::ZAQ4)
2429 .Case("za5.q", AArch64::ZAQ5)
2430 .Case("za6.q", AArch64::ZAQ6)
2431 .Case("za7.q", AArch64::ZAQ7)
2432 .Case("za8.q", AArch64::ZAQ8)
2433 .Case("za9.q", AArch64::ZAQ9)
2434 .Case("za10.q", AArch64::ZAQ10)
2435 .Case("za11.q", AArch64::ZAQ11)
2436 .Case("za12.q", AArch64::ZAQ12)
2437 .Case("za13.q", AArch64::ZAQ13)
2438 .Case("za14.q", AArch64::ZAQ14)
2439 .Case("za15.q", AArch64::ZAQ15)
2440 .Case("za0.d", AArch64::ZAD0)
2441 .Case("za1.d", AArch64::ZAD1)
2442 .Case("za2.d", AArch64::ZAD2)
2443 .Case("za3.d", AArch64::ZAD3)
2444 .Case("za4.d", AArch64::ZAD4)
2445 .Case("za5.d", AArch64::ZAD5)
2446 .Case("za6.d", AArch64::ZAD6)
2447 .Case("za7.d", AArch64::ZAD7)
2448 .Case("za0.s", AArch64::ZAS0)
2449 .Case("za1.s", AArch64::ZAS1)
2450 .Case("za2.s", AArch64::ZAS2)
2451 .Case("za3.s", AArch64::ZAS3)
2452 .Case("za0.h", AArch64::ZAH0)
2453 .Case("za1.h", AArch64::ZAH1)
2454 .Case("za0.b", AArch64::ZAB0)
2455 .Case("za0h.q", AArch64::ZAQ0)
2456 .Case("za1h.q", AArch64::ZAQ1)
2457 .Case("za2h.q", AArch64::ZAQ2)
2458 .Case("za3h.q", AArch64::ZAQ3)
2459 .Case("za4h.q", AArch64::ZAQ4)
2460 .Case("za5h.q", AArch64::ZAQ5)
2461 .Case("za6h.q", AArch64::ZAQ6)
2462 .Case("za7h.q", AArch64::ZAQ7)
2463 .Case("za8h.q", AArch64::ZAQ8)
2464 .Case("za9h.q", AArch64::ZAQ9)
2465 .Case("za10h.q", AArch64::ZAQ10)
2466 .Case("za11h.q", AArch64::ZAQ11)
2467 .Case("za12h.q", AArch64::ZAQ12)
2468 .Case("za13h.q", AArch64::ZAQ13)
2469 .Case("za14h.q", AArch64::ZAQ14)
2470 .Case("za15h.q", AArch64::ZAQ15)
2471 .Case("za0h.d", AArch64::ZAD0)
2472 .Case("za1h.d", AArch64::ZAD1)
2473 .Case("za2h.d", AArch64::ZAD2)
2474 .Case("za3h.d", AArch64::ZAD3)
2475 .Case("za4h.d", AArch64::ZAD4)
2476 .Case("za5h.d", AArch64::ZAD5)
2477 .Case("za6h.d", AArch64::ZAD6)
2478 .Case("za7h.d", AArch64::ZAD7)
2479 .Case("za0h.s", AArch64::ZAS0)
2480 .Case("za1h.s", AArch64::ZAS1)
2481 .Case("za2h.s", AArch64::ZAS2)
2482 .Case("za3h.s", AArch64::ZAS3)
2483 .Case("za0h.h", AArch64::ZAH0)
2484 .Case("za1h.h", AArch64::ZAH1)
2485 .Case("za0h.b", AArch64::ZAB0)
2486 .Case("za0v.q", AArch64::ZAQ0)
2487 .Case("za1v.q", AArch64::ZAQ1)
2488 .Case("za2v.q", AArch64::ZAQ2)
2489 .Case("za3v.q", AArch64::ZAQ3)
2490 .Case("za4v.q", AArch64::ZAQ4)
2491 .Case("za5v.q", AArch64::ZAQ5)
2492 .Case("za6v.q", AArch64::ZAQ6)
2493 .Case("za7v.q", AArch64::ZAQ7)
2494 .Case("za8v.q", AArch64::ZAQ8)
2495 .Case("za9v.q", AArch64::ZAQ9)
2496 .Case("za10v.q", AArch64::ZAQ10)
2497 .Case("za11v.q", AArch64::ZAQ11)
2498 .Case("za12v.q", AArch64::ZAQ12)
2499 .Case("za13v.q", AArch64::ZAQ13)
2500 .Case("za14v.q", AArch64::ZAQ14)
2501 .Case("za15v.q", AArch64::ZAQ15)
2502 .Case("za0v.d", AArch64::ZAD0)
2503 .Case("za1v.d", AArch64::ZAD1)
2504 .Case("za2v.d", AArch64::ZAD2)
2505 .Case("za3v.d", AArch64::ZAD3)
2506 .Case("za4v.d", AArch64::ZAD4)
2507 .Case("za5v.d", AArch64::ZAD5)
2508 .Case("za6v.d", AArch64::ZAD6)
2509 .Case("za7v.d", AArch64::ZAD7)
2510 .Case("za0v.s", AArch64::ZAS0)
2511 .Case("za1v.s", AArch64::ZAS1)
2512 .Case("za2v.s", AArch64::ZAS2)
2513 .Case("za3v.s", AArch64::ZAS3)
2514 .Case("za0v.h", AArch64::ZAH0)
2515 .Case("za1v.h", AArch64::ZAH1)
2516 .Case("za0v.b", AArch64::ZAB0)
2517 .Default(0);
2518}
2519
2520bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2521 SMLoc &EndLoc) {
2522 return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
25
Calling 'AArch64AsmParser::tryParseRegister'
31
Returning from 'AArch64AsmParser::tryParseRegister'
32
Returning without writing to 'RegNo'
2523}
2524
2525OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2526 SMLoc &StartLoc,
2527 SMLoc &EndLoc) {
2528 StartLoc = getLoc();
2529 auto Res = tryParseScalarRegister(RegNo);
26
Calling 'AArch64AsmParser::tryParseScalarRegister'
29
Returning from 'AArch64AsmParser::tryParseScalarRegister'
2530 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2531 return Res;
30
Returning without writing to 'RegNo'
2532}
2533
2534// Matches a register name or register alias previously defined by '.req'
2535unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2536 RegKind Kind) {
2537 unsigned RegNum = 0;
2538 if ((RegNum = matchSVEDataVectorRegName(Name)))
2539 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2540
2541 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2542 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2543
2544 if ((RegNum = MatchNeonVectorRegName(Name)))
2545 return Kind == RegKind::NeonVector ? RegNum : 0;
2546
2547 if ((RegNum = matchMatrixRegName(Name)))
2548 return Kind == RegKind::Matrix ? RegNum : 0;
2549
2550 // The parsed register must be of RegKind Scalar
2551 if ((RegNum = MatchRegisterName(Name)))
2552 return Kind == RegKind::Scalar ? RegNum : 0;
2553
2554 if (!RegNum) {
2555 // Handle a few common aliases of registers.
2556 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2557 .Case("fp", AArch64::FP)
2558 .Case("lr", AArch64::LR)
2559 .Case("x31", AArch64::XZR)
2560 .Case("w31", AArch64::WZR)
2561 .Default(0))
2562 return Kind == RegKind::Scalar ? RegNum : 0;
2563
2564 // Check for aliases registered via .req. Canonicalize to lower case.
2565 // That's more consistent since register names are case insensitive, and
2566 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2567 auto Entry = RegisterReqs.find(Name.lower());
2568 if (Entry == RegisterReqs.end())
2569 return 0;
2570
2571 // set RegNum if the match is the right kind of register
2572 if (Kind == Entry->getValue().first)
2573 RegNum = Entry->getValue().second;
2574 }
2575 return RegNum;
2576}
2577
2578/// tryParseScalarRegister - Try to parse a register name. The token must be an
2579/// Identifier when called, and if it is a register name the token is eaten and
2580/// the register is added to the operand list.
2581OperandMatchResultTy
2582AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2583 MCAsmParser &Parser = getParser();
2584 const AsmToken &Tok = Parser.getTok();
2585 if (Tok.isNot(AsmToken::Identifier))
27
Taking true branch
2586 return MatchOperand_NoMatch;
28
Returning without writing to 'RegNum'
2587
2588 std::string lowerCase = Tok.getString().lower();
2589 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2590 if (Reg == 0)
2591 return MatchOperand_NoMatch;
2592
2593 RegNum = Reg;
2594 Parser.Lex(); // Eat identifier token.
2595 return MatchOperand_Success;
2596}
2597
2598/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2599OperandMatchResultTy
2600AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2601 MCAsmParser &Parser = getParser();
2602 SMLoc S = getLoc();
2603
2604 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2605 Error(S, "Expected cN operand where 0 <= N <= 15");
2606 return MatchOperand_ParseFail;
2607 }
2608
2609 StringRef Tok = Parser.getTok().getIdentifier();
2610 if (Tok[0] != 'c' && Tok[0] != 'C') {
2611 Error(S, "Expected cN operand where 0 <= N <= 15");
2612 return MatchOperand_ParseFail;
2613 }
2614
2615 uint32_t CRNum;
2616 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2617 if (BadNum || CRNum > 15) {
2618 Error(S, "Expected cN operand where 0 <= N <= 15");
2619 return MatchOperand_ParseFail;
2620 }
2621
2622 Parser.Lex(); // Eat identifier token.
2623 Operands.push_back(
2624 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2625 return MatchOperand_Success;
2626}
2627
2628/// tryParsePrefetch - Try to parse a prefetch operand.
2629template <bool IsSVEPrefetch>
2630OperandMatchResultTy
2631AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2632 MCAsmParser &Parser = getParser();
2633 SMLoc S = getLoc();
2634 const AsmToken &Tok = Parser.getTok();
2635
2636 auto LookupByName = [](StringRef N) {
2637 if (IsSVEPrefetch) {
2638 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2639 return Optional<unsigned>(Res->Encoding);
2640 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2641 return Optional<unsigned>(Res->Encoding);
2642 return Optional<unsigned>();
2643 };
2644
2645 auto LookupByEncoding = [](unsigned E) {
2646 if (IsSVEPrefetch) {
2647 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2648 return Optional<StringRef>(Res->Name);
2649 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2650 return Optional<StringRef>(Res->Name);
2651 return Optional<StringRef>();
2652 };
2653 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2654
2655 // Either an identifier for named values or a 5-bit immediate.
2656 // Eat optional hash.
2657 if (parseOptionalToken(AsmToken::Hash) ||
2658 Tok.is(AsmToken::Integer)) {
2659 const MCExpr *ImmVal;
2660 if (getParser().parseExpression(ImmVal))
2661 return MatchOperand_ParseFail;
2662
2663 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2664 if (!MCE) {
2665 TokError("immediate value expected for prefetch operand");
2666 return MatchOperand_ParseFail;
2667 }
2668 unsigned prfop = MCE->getValue();
2669 if (prfop > MaxVal) {
2670 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2671 "] expected");
2672 return MatchOperand_ParseFail;
2673 }
2674
2675 auto PRFM = LookupByEncoding(MCE->getValue());
2676 Operands.push_back(AArch64Operand::CreatePrefetch(
2677 prfop, PRFM.getValueOr(""), S, getContext()));
2678 return MatchOperand_Success;
2679 }
2680
2681 if (Tok.isNot(AsmToken::Identifier)) {
2682 TokError("prefetch hint expected");
2683 return MatchOperand_ParseFail;
2684 }
2685
2686 auto PRFM = LookupByName(Tok.getString());
2687 if (!PRFM) {
2688 TokError("prefetch hint expected");
2689 return MatchOperand_ParseFail;
2690 }
2691
2692 Operands.push_back(AArch64Operand::CreatePrefetch(
2693 *PRFM, Tok.getString(), S, getContext()));
2694 Parser.Lex(); // Eat identifier token.
2695 return MatchOperand_Success;
2696}
2697
2698/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2699OperandMatchResultTy
2700AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2701 MCAsmParser &Parser = getParser();
2702 SMLoc S = getLoc();
2703 const AsmToken &Tok = Parser.getTok();
2704 if (Tok.isNot(AsmToken::Identifier)) {
2705 TokError("invalid operand for instruction");
2706 return MatchOperand_ParseFail;
2707 }
2708
2709 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2710 if (!PSB) {
2711 TokError("invalid operand for instruction");
2712 return MatchOperand_ParseFail;
2713 }
2714
2715 Operands.push_back(AArch64Operand::CreatePSBHint(
2716 PSB->Encoding, Tok.getString(), S, getContext()));
2717 Parser.Lex(); // Eat identifier token.
2718 return MatchOperand_Success;
2719}
2720
2721/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2722OperandMatchResultTy
2723AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2724 MCAsmParser &Parser = getParser();
2725 SMLoc S = getLoc();
2726 const AsmToken &Tok = Parser.getTok();
2727 if (Tok.isNot(AsmToken::Identifier)) {
2728 TokError("invalid operand for instruction");
2729 return MatchOperand_ParseFail;
2730 }
2731
2732 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2733 if (!BTI) {
2734 TokError("invalid operand for instruction");
2735 return MatchOperand_ParseFail;
2736 }
2737
2738 Operands.push_back(AArch64Operand::CreateBTIHint(
2739 BTI->Encoding, Tok.getString(), S, getContext()));
2740 Parser.Lex(); // Eat identifier token.
2741 return MatchOperand_Success;
2742}
2743
2744/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2745/// instruction.
2746OperandMatchResultTy
2747AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2748 MCAsmParser &Parser = getParser();
2749 SMLoc S = getLoc();
2750 const MCExpr *Expr = nullptr;
2751
2752 if (Parser.getTok().is(AsmToken::Hash)) {
2753 Parser.Lex(); // Eat hash token.
2754 }
2755
2756 if (parseSymbolicImmVal(Expr))
2757 return MatchOperand_ParseFail;
2758
2759 AArch64MCExpr::VariantKind ELFRefKind;
2760 MCSymbolRefExpr::VariantKind DarwinRefKind;
2761 int64_t Addend;
2762 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2763 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2764 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2765 // No modifier was specified at all; this is the syntax for an ELF basic
2766 // ADRP relocation (unfortunately).
2767 Expr =
2768 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2769 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2770 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2771 Addend != 0) {
2772 Error(S, "gotpage label reference not allowed an addend");
2773 return MatchOperand_ParseFail;
2774 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2775 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2776 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2777 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2778 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2779 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
2780 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2781 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2782 // The operand must be an @page or @gotpage qualified symbolref.
2783 Error(S, "page or gotpage label reference expected");
2784 return MatchOperand_ParseFail;
2785 }
2786 }
2787
2788 // We have either a label reference possibly with addend or an immediate. The
2789 // addend is a raw value here. The linker will adjust it to only reference the
2790 // page.
2791 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2792 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2793
2794 return MatchOperand_Success;
2795}
2796
2797/// tryParseAdrLabel - Parse and validate a source label for the ADR
2798/// instruction.
2799OperandMatchResultTy
2800AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2801 SMLoc S = getLoc();
2802 const MCExpr *Expr = nullptr;
2803
2804 // Leave anything with a bracket to the default for SVE
2805 if (getParser().getTok().is(AsmToken::LBrac))
2806 return MatchOperand_NoMatch;
2807
2808 if (getParser().getTok().is(AsmToken::Hash))
2809 getParser().Lex(); // Eat hash token.
2810
2811 if (parseSymbolicImmVal(Expr))
2812 return MatchOperand_ParseFail;
2813
2814 AArch64MCExpr::VariantKind ELFRefKind;
2815 MCSymbolRefExpr::VariantKind DarwinRefKind;
2816 int64_t Addend;
2817 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2818 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2819 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2820 // No modifier was specified at all; this is the syntax for an ELF basic
2821 // ADR relocation (unfortunately).
2822 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2823 } else {
2824 Error(S, "unexpected adr label");
2825 return MatchOperand_ParseFail;
2826 }
2827 }
2828
2829 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2830 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2831 return MatchOperand_Success;
2832}
2833
2834/// tryParseFPImm - A floating point immediate expression operand.
2835template<bool AddFPZeroAsLiteral>
2836OperandMatchResultTy
2837AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2838 MCAsmParser &Parser = getParser();
2839 SMLoc S = getLoc();
2840
2841 bool Hash = parseOptionalToken(AsmToken::Hash);
2842
2843 // Handle negation, as that still comes through as a separate token.
2844 bool isNegative = parseOptionalToken(AsmToken::Minus);
2845
2846 const AsmToken &Tok = Parser.getTok();
2847 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2848 if (!Hash)
2849 return MatchOperand_NoMatch;
2850 TokError("invalid floating point immediate");
2851 return MatchOperand_ParseFail;
2852 }
2853
2854 // Parse hexadecimal representation.
2855 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2856 if (Tok.getIntVal() > 255 || isNegative) {
2857 TokError("encoded floating point value out of range");
2858 return MatchOperand_ParseFail;
2859 }
2860
2861 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2862 Operands.push_back(
2863 AArch64Operand::CreateFPImm(F, true, S, getContext()));
2864 } else {
2865 // Parse FP representation.
2866 APFloat RealVal(APFloat::IEEEdouble());
2867 auto StatusOrErr =
2868 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2869 if (errorToBool(StatusOrErr.takeError())) {
2870 TokError("invalid floating point representation");
2871 return MatchOperand_ParseFail;
2872 }
2873
2874 if (isNegative)
2875 RealVal.changeSign();
2876
2877 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2878 Operands.push_back(
2879 AArch64Operand::CreateToken("#0", false, S, getContext()));
2880 Operands.push_back(
2881 AArch64Operand::CreateToken(".0", false, S, getContext()));
2882 } else
2883 Operands.push_back(AArch64Operand::CreateFPImm(
2884 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2885 }
2886
2887 Parser.Lex(); // Eat the token.
2888
2889 return MatchOperand_Success;
2890}
2891
2892/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2893/// a shift suffix, for example '#1, lsl #12'.
2894OperandMatchResultTy
2895AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2896 MCAsmParser &Parser = getParser();
2897 SMLoc S = getLoc();
2898
2899 if (Parser.getTok().is(AsmToken::Hash))
2900 Parser.Lex(); // Eat '#'
2901 else if (Parser.getTok().isNot(AsmToken::Integer))
2902 // Operand should start from # or should be integer, emit error otherwise.
2903 return MatchOperand_NoMatch;
2904
2905 const MCExpr *Imm = nullptr;
2906 if (parseSymbolicImmVal(Imm))
2907 return MatchOperand_ParseFail;
2908 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2909 SMLoc E = Parser.getTok().getLoc();
2910 Operands.push_back(
2911 AArch64Operand::CreateImm(Imm, S, E, getContext()));
2912 return MatchOperand_Success;
2913 }
2914
2915 // Eat ','
2916 Parser.Lex();
2917
2918 // The optional operand must be "lsl #N" where N is non-negative.
2919 if (!Parser.getTok().is(AsmToken::Identifier) ||
2920 !Parser.getTok().getIdentifier().equals_insensitive("lsl")) {
2921 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2922 return MatchOperand_ParseFail;
2923 }
2924
2925 // Eat 'lsl'
2926 Parser.Lex();
2927
2928 parseOptionalToken(AsmToken::Hash);
2929
2930 if (Parser.getTok().isNot(AsmToken::Integer)) {
2931 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2932 return MatchOperand_ParseFail;
2933 }
2934
2935 int64_t ShiftAmount = Parser.getTok().getIntVal();
2936
2937 if (ShiftAmount < 0) {
2938 Error(Parser.getTok().getLoc(), "positive shift amount required");
2939 return MatchOperand_ParseFail;
2940 }
2941 Parser.Lex(); // Eat the number
2942
2943 // Just in case the optional lsl #0 is used for immediates other than zero.
2944 if (ShiftAmount == 0 && Imm != nullptr) {
2945 SMLoc E = Parser.getTok().getLoc();
2946 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2947 return MatchOperand_Success;
2948 }
2949
2950 SMLoc E = Parser.getTok().getLoc();
2951 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2952 S, E, getContext()));
2953 return MatchOperand_Success;
2954}
2955
2956/// parseCondCodeString - Parse a Condition Code string.
2957AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2958 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2959 .Case("eq", AArch64CC::EQ)
2960 .Case("ne", AArch64CC::NE)
2961 .Case("cs", AArch64CC::HS)
2962 .Case("hs", AArch64CC::HS)
2963 .Case("cc", AArch64CC::LO)
2964 .Case("lo", AArch64CC::LO)
2965 .Case("mi", AArch64CC::MI)
2966 .Case("pl", AArch64CC::PL)
2967 .Case("vs", AArch64CC::VS)
2968 .Case("vc", AArch64CC::VC)
2969 .Case("hi", AArch64CC::HI)
2970 .Case("ls", AArch64CC::LS)
2971 .Case("ge", AArch64CC::GE)
2972 .Case("lt", AArch64CC::LT)
2973 .Case("gt", AArch64CC::GT)
2974 .Case("le", AArch64CC::LE)
2975 .Case("al", AArch64CC::AL)
2976 .Case("nv", AArch64CC::NV)
2977 .Default(AArch64CC::Invalid);
2978
2979 if (CC == AArch64CC::Invalid &&
2980 getSTI().getFeatureBits()[AArch64::FeatureSVE])
2981 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2982 .Case("none", AArch64CC::EQ)
2983 .Case("any", AArch64CC::NE)
2984 .Case("nlast", AArch64CC::HS)
2985 .Case("last", AArch64CC::LO)
2986 .Case("first", AArch64CC::MI)
2987 .Case("nfrst", AArch64CC::PL)
2988 .Case("pmore", AArch64CC::HI)
2989 .Case("plast", AArch64CC::LS)
2990 .Case("tcont", AArch64CC::GE)
2991 .Case("tstop", AArch64CC::LT)
2992 .Default(AArch64CC::Invalid);
2993
2994 return CC;
2995}
2996
2997/// parseCondCode - Parse a Condition Code operand.
2998bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2999 bool invertCondCode) {
3000 MCAsmParser &Parser = getParser();
3001 SMLoc S = getLoc();
3002 const AsmToken &Tok = Parser.getTok();
3003 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")(static_cast <bool> (Tok.is(AsmToken::Identifier) &&
"Token is not an Identifier") ? void (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3003, __extension__ __PRETTY_FUNCTION__))
;
3004
3005 StringRef Cond = Tok.getString();
3006 AArch64CC::CondCode CC = parseCondCodeString(Cond);
3007 if (CC == AArch64CC::Invalid)
3008 return TokError("invalid condition code");
3009 Parser.Lex(); // Eat identifier token.
3010
3011 if (invertCondCode) {
3012 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3013 return TokError("condition codes AL and NV are invalid for this instruction");
3014 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
3015 }
3016
3017 Operands.push_back(
3018 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3019 return false;
3020}
3021
3022OperandMatchResultTy
3023AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3024 MCAsmParser &Parser = getParser();
3025 const AsmToken &Tok = Parser.getTok();
3026 SMLoc S = getLoc();
3027
3028 if (Tok.isNot(AsmToken::Identifier)) {
3029 TokError("invalid operand for instruction");
3030 return MatchOperand_ParseFail;
3031 }
3032
3033 unsigned PStateImm = -1;
3034 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3035 if (SVCR && SVCR->haveFeatures(getSTI().getFeatureBits()))
3036 PStateImm = SVCR->Encoding;
3037
3038 Operands.push_back(
3039 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3040 Parser.Lex(); // Eat identifier token.
3041 return MatchOperand_Success;
3042}
3043
3044OperandMatchResultTy
3045AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3046 MCAsmParser &Parser = getParser();
3047 const AsmToken &Tok = Parser.getTok();
3048 SMLoc S = getLoc();
3049
3050 StringRef Name = Tok.getString();
3051
3052 if (Name.equals_insensitive("za")) {
3053 Parser.Lex(); // eat "za"
3054 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3055 AArch64::ZA, /*ElementWidth=*/0, MatrixKind::Array, S, getLoc(),
3056 getContext()));
3057 if (getLexer().is(AsmToken::LBrac)) {
3058 // There's no comma after matrix operand, so we can parse the next operand
3059 // immediately.
3060 if (parseOperand(Operands, false, false))
3061 return MatchOperand_NoMatch;
3062 }
3063 return MatchOperand_Success;
3064 }
3065
3066 // Try to parse matrix register.
3067 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3068 if (!Reg)
3069 return MatchOperand_NoMatch;
3070
3071 size_t DotPosition = Name.find('.');
3072 assert(DotPosition != StringRef::npos && "Unexpected register")(static_cast <bool> (DotPosition != StringRef::npos &&
"Unexpected register") ? void (0) : __assert_fail ("DotPosition != StringRef::npos && \"Unexpected register\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3072, __extension__ __PRETTY_FUNCTION__))
;
3073
3074 StringRef Head = Name.take_front(DotPosition);
3075 StringRef Tail = Name.drop_front(DotPosition);
3076 StringRef RowOrColumn = Head.take_back();
3077
3078 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn)
3079 .Case("h", MatrixKind::Row)
3080 .Case("v", MatrixKind::Col)
3081 .Default(MatrixKind::Tile);
3082
3083 // Next up, parsing the suffix
3084 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3085 if (!KindRes) {
3086 TokError("Expected the register to be followed by element width suffix");
3087 return MatchOperand_ParseFail;
3088 }
3089 unsigned ElementWidth = KindRes->second;
3090
3091 Parser.Lex();
3092
3093 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3094 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3095
3096 if (getLexer().is(AsmToken::LBrac)) {
3097 // There's no comma after matrix operand, so we can parse the next operand
3098 // immediately.
3099 if (parseOperand(Operands, false, false))
3100 return MatchOperand_NoMatch;
3101 }
3102 return MatchOperand_Success;
3103}
3104
3105/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3106/// them if present.
3107OperandMatchResultTy
3108AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3109 MCAsmParser &Parser = getParser();
3110 const AsmToken &Tok = Parser.getTok();
3111 std::string LowerID = Tok.getString().lower();
3112 AArch64_AM::ShiftExtendType ShOp =
3113 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3114 .Case("lsl", AArch64_AM::LSL)
3115 .Case("lsr", AArch64_AM::LSR)
3116 .Case("asr", AArch64_AM::ASR)
3117 .Case("ror", AArch64_AM::ROR)
3118 .Case("msl", AArch64_AM::MSL)
3119 .Case("uxtb", AArch64_AM::UXTB)
3120 .Case("uxth", AArch64_AM::UXTH)
3121 .Case("uxtw", AArch64_AM::UXTW)
3122 .Case("uxtx", AArch64_AM::UXTX)
3123 .Case("sxtb", AArch64_AM::SXTB)
3124 .Case("sxth", AArch64_AM::SXTH)
3125 .Case("sxtw", AArch64_AM::SXTW)
3126 .Case("sxtx", AArch64_AM::SXTX)
3127 .Default(AArch64_AM::InvalidShiftExtend);
3128
3129 if (ShOp == AArch64_AM::InvalidShiftExtend)
3130 return MatchOperand_NoMatch;
3131
3132 SMLoc S = Tok.getLoc();
3133 Parser.Lex();
3134
3135 bool Hash = parseOptionalToken(AsmToken::Hash);
3136
3137 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3138 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3139 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3140 ShOp == AArch64_AM::MSL) {
3141 // We expect a number here.
3142 TokError("expected #imm after shift specifier");
3143 return MatchOperand_ParseFail;
3144 }
3145
3146 // "extend" type operations don't need an immediate, #0 is implicit.
3147 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3148 Operands.push_back(
3149 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3150 return MatchOperand_Success;
3151 }
3152
3153 // Make sure we do actually have a number, identifier or a parenthesized
3154 // expression.
3155 SMLoc E = Parser.getTok().getLoc();
3156 if (!Parser.getTok().is(AsmToken::Integer) &&
3157 !Parser.getTok().is(AsmToken::LParen) &&
3158 !Parser.getTok().is(AsmToken::Identifier)) {
3159 Error(E, "expected integer shift amount");
3160 return MatchOperand_ParseFail;
3161 }
3162
3163 const MCExpr *ImmVal;
3164 if (getParser().parseExpression(ImmVal))
3165 return MatchOperand_ParseFail;
3166
3167 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3168 if (!MCE) {
3169 Error(E, "expected constant '#imm' after shift specifier");
3170 return MatchOperand_ParseFail;
3171 }
3172
3173 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3174 Operands.push_back(AArch64Operand::CreateShiftExtend(
3175 ShOp, MCE->getValue(), true, S, E, getContext()));
3176 return MatchOperand_Success;
3177}
3178
3179static const struct Extension {
3180 const char *Name;
3181 const FeatureBitset Features;
3182} ExtensionMap[] = {
3183 {"crc", {AArch64::FeatureCRC}},
3184 {"sm4", {AArch64::FeatureSM4}},
3185 {"sha3", {AArch64::FeatureSHA3}},
3186 {"sha2", {AArch64::FeatureSHA2}},
3187 {"aes", {AArch64::FeatureAES}},
3188 {"crypto", {AArch64::FeatureCrypto}},
3189 {"fp", {AArch64::FeatureFPARMv8}},
3190 {"simd", {AArch64::FeatureNEON}},
3191 {"ras", {AArch64::FeatureRAS}},
3192 {"lse", {AArch64::FeatureLSE}},
3193 {"predres", {AArch64::FeaturePredRes}},
3194 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3195 {"mte", {AArch64::FeatureMTE}},
3196 {"memtag", {AArch64::FeatureMTE}},
3197 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3198 {"pan", {AArch64::FeaturePAN}},
3199 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3200 {"ccpp", {AArch64::FeatureCCPP}},
3201 {"rcpc", {AArch64::FeatureRCPC}},
3202 {"rng", {AArch64::FeatureRandGen}},
3203 {"sve", {AArch64::FeatureSVE}},
3204 {"sve2", {AArch64::FeatureSVE2}},
3205 {"sve2-aes", {AArch64::FeatureSVE2AES}},
3206 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3207 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3208 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3209 {"ls64", {AArch64::FeatureLS64}},
3210 {"xs", {AArch64::FeatureXS}},
3211 {"pauth", {AArch64::FeaturePAuth}},
3212 {"flagm", {AArch64::FeatureFlagM}},
3213 {"rme", {AArch64::FeatureRME}},
3214 {"sme", {AArch64::FeatureSME}},
3215 {"sme-f64", {AArch64::FeatureSMEF64}},
3216 {"sme-i64", {AArch64::FeatureSMEI64}},
3217 // FIXME: Unsupported extensions
3218 {"lor", {}},
3219 {"rdma", {}},
3220 {"profile", {}},
3221};
3222
3223static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3224 if (FBS[AArch64::HasV8_1aOps])
3225 Str += "ARMv8.1a";
3226 else if (FBS[AArch64::HasV8_2aOps])
3227 Str += "ARMv8.2a";
3228 else if (FBS[AArch64::HasV8_3aOps])
3229 Str += "ARMv8.3a";
3230 else if (FBS[AArch64::HasV8_4aOps])
3231 Str += "ARMv8.4a";
3232 else if (FBS[AArch64::HasV8_5aOps])
3233 Str += "ARMv8.5a";
3234 else if (FBS[AArch64::HasV8_6aOps])
3235 Str += "ARMv8.6a";
3236 else if (FBS[AArch64::HasV8_7aOps])
3237 Str += "ARMv8.7a";
3238 else {
3239 SmallVector<std::string, 2> ExtMatches;
3240 for (const auto& Ext : ExtensionMap) {
3241 // Use & in case multiple features are enabled
3242 if ((FBS & Ext.Features) != FeatureBitset())
3243 ExtMatches.push_back(Ext.Name);
3244 }
3245 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3246 }
3247}
3248
3249void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3250 SMLoc S) {
3251 const uint16_t Op2 = Encoding & 7;
3252 const uint16_t Cm = (Encoding & 0x78) >> 3;
3253 const uint16_t Cn = (Encoding & 0x780) >> 7;
3254 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3255
3256 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3257
3258 Operands.push_back(
3259 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3260 Operands.push_back(
3261 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3262 Operands.push_back(
3263 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3264 Expr = MCConstantExpr::create(Op2, getContext());
3265 Operands.push_back(
3266 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3267}
3268
3269/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3270/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3271bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3272 OperandVector &Operands) {
3273 if (Name.find('.') != StringRef::npos)
3274 return TokError("invalid operand");
3275
3276 Mnemonic = Name;
3277 Operands.push_back(
3278 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
3279
3280 MCAsmParser &Parser = getParser();
3281 const AsmToken &Tok = Parser.getTok();
3282 StringRef Op = Tok.getString();
3283 SMLoc S = Tok.getLoc();
3284
3285 if (Mnemonic == "ic") {
3286 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3287 if (!IC)
3288 return TokError("invalid operand for IC instruction");
3289 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3290 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3291 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
3292 return TokError(Str.c_str());
3293 }
3294 createSysAlias(IC->Encoding, Operands, S);
3295 } else if (Mnemonic == "dc") {
3296 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3297 if (!DC)
3298 return TokError("invalid operand for DC instruction");
3299 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3300 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3301 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3302 return TokError(Str.c_str());
3303 }
3304 createSysAlias(DC->Encoding, Operands, S);
3305 } else if (Mnemonic == "at") {
3306 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3307 if (!AT)
3308 return TokError("invalid operand for AT instruction");
3309 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3310 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3311 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
3312 return TokError(Str.c_str());
3313 }
3314 createSysAlias(AT->Encoding, Operands, S);
3315 } else if (Mnemonic == "tlbi") {
3316 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3317 if (!TLBI)
3318 return TokError("invalid operand for TLBI instruction");
3319 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3320 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3321 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
3322 return TokError(Str.c_str());
3323 }
3324 createSysAlias(TLBI->Encoding, Operands, S);
3325 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
3326 const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
3327 if (!PRCTX)
3328 return TokError("invalid operand for prediction restriction instruction");
3329 else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
3330 std::string Str(
3331 Mnemonic.upper() + std::string(PRCTX->Name) + " requires: ");
3332 setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
3333 return TokError(Str.c_str());
3334 }
3335 uint16_t PRCTX_Op2 =
3336 Mnemonic == "cfp" ? 4 :
3337 Mnemonic == "dvp" ? 5 :
3338 Mnemonic == "cpp" ? 7 :
3339 0;
3340 assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction")(static_cast <bool> (PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction"
) ? void (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3340, __extension__ __PRETTY_FUNCTION__))
;
3341 createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
3342 }
3343
3344 Parser.Lex(); // Eat operand.
3345
3346 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3347 bool HasRegister = false;
3348
3349 // Check for the optional register operand.
3350 if (parseOptionalToken(AsmToken::Comma)) {
3351 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3352 return TokError("expected register operand");
3353 HasRegister = true;
3354 }
3355
3356 if (ExpectRegister && !HasRegister)
3357 return TokError("specified " + Mnemonic + " op requires a register");
3358 else if (!ExpectRegister && HasRegister)
3359 return TokError("specified " + Mnemonic + " op does not use a register");
3360
3361 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3362 return true;
3363
3364 return false;
3365}
3366
3367OperandMatchResultTy
3368AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3369 MCAsmParser &Parser = getParser();
3370 const AsmToken &Tok = Parser.getTok();
3371
3372 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3373 TokError("'csync' operand expected");
3374 return MatchOperand_ParseFail;
3375 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3376 // Immediate operand.
3377 const MCExpr *ImmVal;
3378 SMLoc ExprLoc = getLoc();
3379 AsmToken IntTok = Tok;
3380 if (getParser().parseExpression(ImmVal))
3381 return MatchOperand_ParseFail;
3382 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3383 if (!MCE) {
3384 Error(ExprLoc, "immediate value expected for barrier operand");
3385 return MatchOperand_ParseFail;
3386 }
3387 int64_t Value = MCE->getValue();
3388 if (Mnemonic == "dsb" && Value > 15) {
3389 // This case is a no match here, but it might be matched by the nXS
3390 // variant. Deliberately not unlex the optional '#' as it is not necessary
3391 // to characterize an integer immediate.
3392 Parser.getLexer().UnLex(IntTok);
3393 return MatchOperand_NoMatch;
3394 }
3395 if (Value < 0 || Value > 15) {
3396 Error(ExprLoc, "barrier operand out of range");
3397 return MatchOperand_ParseFail;
3398 }
3399 auto DB = AArch64DB::lookupDBByEncoding(Value);
3400 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3401 ExprLoc, getContext(),
3402 false /*hasnXSModifier*/));
3403 return MatchOperand_Success;
3404 }
3405
3406 if (Tok.isNot(AsmToken::Identifier)) {
3407 TokError("invalid operand for instruction");
3408 return MatchOperand_ParseFail;
3409 }
3410
3411 StringRef Operand = Tok.getString();
3412 auto TSB = AArch64TSB::lookupTSBByName(Operand);
3413 auto DB = AArch64DB::lookupDBByName(Operand);
3414 // The only valid named option for ISB is 'sy'
3415 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3416 TokError("'sy' or #imm operand expected");
3417 return MatchOperand_ParseFail;
3418 // The only valid named option for TSB is 'csync'
3419 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3420 TokError("'csync' operand expected");
3421 return MatchOperand_ParseFail;
3422 } else if (!DB && !TSB) {
3423 if (Mnemonic == "dsb") {
3424 // This case is a no match here, but it might be matched by the nXS
3425 // variant.
3426 return MatchOperand_NoMatch;
3427 }
3428 TokError("invalid barrier option name");
3429 return MatchOperand_ParseFail;
3430 }
3431
3432 Operands.push_back(AArch64Operand::CreateBarrier(
3433 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3434 getContext(), false /*hasnXSModifier*/));
3435 Parser.Lex(); // Consume the option
3436
3437 return MatchOperand_Success;
3438}
3439
3440OperandMatchResultTy
3441AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3442 MCAsmParser &Parser = getParser();
3443 const AsmToken &Tok = Parser.getTok();
3444
3445 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands")(static_cast <bool> (Mnemonic == "dsb" && "Instruction does not accept nXS operands"
) ? void (0) : __assert_fail ("Mnemonic == \"dsb\" && \"Instruction does not accept nXS operands\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3445, __extension__ __PRETTY_FUNCTION__))
;
3446 if (Mnemonic != "dsb")
3447 return MatchOperand_ParseFail;
3448
3449 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3450 // Immediate operand.
3451 const MCExpr *ImmVal;
3452 SMLoc ExprLoc = getLoc();
3453 if (getParser().parseExpression(ImmVal))
3454 return MatchOperand_ParseFail;
3455 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3456 if (!MCE) {
3457 Error(ExprLoc, "immediate value expected for barrier operand");
3458 return MatchOperand_ParseFail;
3459 }
3460 int64_t Value = MCE->getValue();
3461 // v8.7-A DSB in the nXS variant accepts only the following immediate
3462 // values: 16, 20, 24, 28.
3463 if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
3464 Error(ExprLoc, "barrier operand out of range");
3465 return MatchOperand_ParseFail;
3466 }
3467 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
3468 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
3469 ExprLoc, getContext(),
3470 true /*hasnXSModifier*/));
3471 return MatchOperand_Success;
3472 }
3473
3474 if (Tok.isNot(AsmToken::Identifier)) {
3475 TokError("invalid operand for instruction");
3476 return MatchOperand_ParseFail;
3477 }
3478
3479 StringRef Operand = Tok.getString();
3480 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
3481
3482 if (!DB) {
3483 TokError("invalid barrier option name");
3484 return MatchOperand_ParseFail;
3485 }
3486
3487 Operands.push_back(
3488 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
3489 getContext(), true /*hasnXSModifier*/));
3490 Parser.Lex(); // Consume the option
3491
3492 return MatchOperand_Success;
3493}
3494
3495OperandMatchResultTy
3496AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3497 MCAsmParser &Parser = getParser();
3498 const AsmToken &Tok = Parser.getTok();
3499
3500 if (Tok.isNot(AsmToken::Identifier))
3501 return MatchOperand_NoMatch;
3502
3503 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
3504 return MatchOperand_NoMatch;
3505
3506 int MRSReg, MSRReg;
3507 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3508 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3509 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3510 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3511 } else
3512 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3513
3514 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3515 unsigned PStateImm = -1;
3516 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3517 PStateImm = PState->Encoding;
3518
3519 Operands.push_back(
3520 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3521 PStateImm, getContext()));
3522 Parser.Lex(); // Eat identifier
3523
3524 return MatchOperand_Success;
3525}
3526
3527/// tryParseNeonVectorRegister - Parse a vector register operand.
3528bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3529 MCAsmParser &Parser = getParser();
3530 if (Parser.getTok().isNot(AsmToken::Identifier))
3531 return true;
3532
3533 SMLoc S = getLoc();
3534 // Check for a vector register specifier first.
3535 StringRef Kind;
3536 unsigned Reg;
3537 OperandMatchResultTy Res =
3538 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3539 if (Res != MatchOperand_Success)
3540 return true;
3541
3542 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3543 if (!KindRes)
3544 return true;
3545
3546 unsigned ElementWidth = KindRes->second;
3547 Operands.push_back(
3548 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3549 S, getLoc(), getContext()));
3550
3551 // If there was an explicit qualifier, that goes on as a literal text
3552 // operand.
3553 if (!Kind.empty())
3554 Operands.push_back(
3555 AArch64Operand::CreateToken(Kind, false, S, getContext()));
3556
3557 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3558}
3559
3560OperandMatchResultTy
3561AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3562 SMLoc SIdx = getLoc();
3563 if (parseOptionalToken(AsmToken::LBrac)) {
3564 const MCExpr *ImmVal;
3565 if (getParser().parseExpression(ImmVal))
3566 return MatchOperand_NoMatch;
3567 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3568 if (!MCE) {
3569 TokError("immediate value expected for vector index");
3570 return MatchOperand_ParseFail;;
3571 }
3572
3573 SMLoc E = getLoc();
3574
3575 if (parseToken(AsmToken::RBrac, "']' expected"))
3576 return MatchOperand_ParseFail;;
3577
3578 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3579 E, getContext()));
3580 return MatchOperand_Success;
3581 }
3582
3583 return MatchOperand_NoMatch;
3584}
3585
3586// tryParseVectorRegister - Try to parse a vector register name with
3587// optional kind specifier. If it is a register specifier, eat the token
3588// and return it.
3589OperandMatchResultTy
3590AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3591 RegKind MatchKind) {
3592 MCAsmParser &Parser = getParser();
3593 const AsmToken &Tok = Parser.getTok();
3594
3595 if (Tok.isNot(AsmToken::Identifier))
3596 return MatchOperand_NoMatch;
3597
3598 StringRef Name = Tok.getString();
3599 // If there is a kind specifier, it's separated from the register name by
3600 // a '.'.
3601 size_t Start = 0, Next = Name.find('.');
3602 StringRef Head = Name.slice(Start, Next);
3603 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3604
3605 if (RegNum) {
3606 if (Next != StringRef::npos) {
3607 Kind = Name.slice(Next, StringRef::npos);
3608 if (!isValidVectorKind(Kind, MatchKind)) {
3609 TokError("invalid vector kind qualifier");
3610 return MatchOperand_ParseFail;
3611 }
3612 }
3613 Parser.Lex(); // Eat the register token.
3614
3615 Reg = RegNum;
3616 return MatchOperand_Success;
3617 }
3618
3619 return MatchOperand_NoMatch;
3620}
3621
3622/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3623OperandMatchResultTy
3624AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3625 // Check for a SVE predicate register specifier first.
3626 const SMLoc S = getLoc();
3627 StringRef Kind;
3628 unsigned RegNum;
3629 auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3630 if (Res != MatchOperand_Success)
3631 return Res;
3632
3633 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3634 if (!KindRes)
3635 return MatchOperand_NoMatch;
3636
3637 unsigned ElementWidth = KindRes->second;
3638 Operands.push_back(AArch64Operand::CreateVectorReg(
3639 RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3640 getLoc(), getContext()));
3641
3642 if (getLexer().is(AsmToken::LBrac)) {
3643 // Indexed predicate, there's no comma so try parse the next operand
3644 // immediately.
3645 if (parseOperand(Operands, false, false))
3646 return MatchOperand_NoMatch;
3647 }
3648
3649 // Not all predicates are followed by a '/m' or '/z'.
3650 MCAsmParser &Parser = getParser();
3651 if (Parser.getTok().isNot(AsmToken::Slash))
3652 return MatchOperand_Success;
3653
3654 // But when they do they shouldn't have an element type suffix.
3655 if (!Kind.empty()) {
3656 Error(S, "not expecting size suffix");
3657 return MatchOperand_ParseFail;
3658 }
3659
3660 // Add a literal slash as operand
3661 Operands.push_back(
3662 AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3663
3664 Parser.Lex(); // Eat the slash.
3665
3666 // Zeroing or merging?
3667 auto Pred = Parser.getTok().getString().lower();
3668 if (Pred != "z" && Pred != "m") {
3669 Error(getLoc(), "expecting 'm' or 'z' predication");
3670 return MatchOperand_ParseFail;
3671 }
3672
3673 // Add zero/merge token.
3674 const char *ZM = Pred == "z" ? "z" : "m";
3675 Operands.push_back(
3676 AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3677
3678 Parser.Lex(); // Eat zero/merge token.
3679 return MatchOperand_Success;
3680}
3681
3682/// parseRegister - Parse a register operand.
3683bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3684 // Try for a Neon vector register.
3685 if (!tryParseNeonVectorRegister(Operands))
3686 return false;
3687
3688 // Otherwise try for a scalar register.
3689 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3690 return false;
3691
3692 return true;
3693}
3694
3695bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3696 MCAsmParser &Parser = getParser();
3697 bool HasELFModifier = false;
3698 AArch64MCExpr::VariantKind RefKind;
3699
3700 if (parseOptionalToken(AsmToken::Colon)) {
3701 HasELFModifier = true;
3702
3703 if (Parser.getTok().isNot(AsmToken::Identifier))
3704 return TokError("expect relocation specifier in operand after ':'");
3705
3706 std::string LowerCase = Parser.getTok().getIdentifier().lower();
3707 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3708 .Case("lo12", AArch64MCExpr::VK_LO12)
3709 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3710 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3711 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3712 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3713 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3714 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3715 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3716 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3717 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3718 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3719 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3720 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3721 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3722 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3723 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3724 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3725 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3726 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3727 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3728 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3729 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3730 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3731 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3732 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3733 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3734 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3735 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3736 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3737 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3738 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3739 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3740 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3741 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3742 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3743 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3744 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3745 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
3746 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3747 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3748 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3749 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3750 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3751 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3752 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3753 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3754 .Default(AArch64MCExpr::VK_INVALID);
3755
3756 if (RefKind == AArch64MCExpr::VK_INVALID)
3757 return TokError("expect relocation specifier in operand after ':'");
3758
3759 Parser.Lex(); // Eat identifier
3760
3761 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3762 return true;
3763 }
3764
3765 if (getParser().parseExpression(ImmVal))
3766 return true;
3767
3768 if (HasELFModifier)
3769 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3770
3771 return false;
3772}
3773
3774template <RegKind VectorKind>
3775OperandMatchResultTy
3776AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3777 bool ExpectMatch) {
3778 MCAsmParser &Parser = getParser();
3779 if (!Parser.getTok().is(AsmToken::LCurly))
3780 return MatchOperand_NoMatch;
3781
3782 // Wrapper around parse function
3783 auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3784 bool NoMatchIsError) {
3785 auto RegTok = Parser.getTok();
3786 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3787 if (ParseRes == MatchOperand_Success) {
3788 if (parseVectorKind(Kind, VectorKind))
3789 return ParseRes;
3790 llvm_unreachable("Expected a valid vector kind")::llvm::llvm_unreachable_internal("Expected a valid vector kind"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3790)
;
3791 }
3792
3793 if (RegTok.isNot(AsmToken::Identifier) ||
3794 ParseRes == MatchOperand_ParseFail ||
3795 (ParseRes == MatchOperand_NoMatch && NoMatchIsError &&
3796 !RegTok.getString().startswith_insensitive("za"))) {
3797 Error(Loc, "vector register expected");
3798 return MatchOperand_ParseFail;
3799 }
3800
3801 return MatchOperand_NoMatch;
3802 };
3803
3804 SMLoc S = getLoc();
3805 auto LCurly = Parser.getTok();
3806 Parser.Lex(); // Eat left bracket token.
3807
3808 StringRef Kind;
3809 unsigned FirstReg;
3810 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3811
3812 // Put back the original left bracket if there was no match, so that
3813 // different types of list-operands can be matched (e.g. SVE, Neon).
3814 if (ParseRes == MatchOperand_NoMatch)
3815 Parser.getLexer().UnLex(LCurly);
3816
3817 if (ParseRes != MatchOperand_Success)
3818 return ParseRes;
3819
3820 int64_t PrevReg = FirstReg;
3821 unsigned Count = 1;
3822
3823 if (parseOptionalToken(AsmToken::Minus)) {
3824 SMLoc Loc = getLoc();
3825 StringRef NextKind;
3826
3827 unsigned Reg;
3828 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3829 if (ParseRes != MatchOperand_Success)
3830 return ParseRes;
3831
3832 // Any Kind suffices must match on all regs in the list.
3833 if (Kind != NextKind) {
3834 Error(Loc, "mismatched register size suffix");
3835 return MatchOperand_ParseFail;
3836 }
3837
3838 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3839
3840 if (Space == 0 || Space > 3) {
3841 Error(Loc, "invalid number of vectors");
3842 return MatchOperand_ParseFail;
3843 }
3844
3845 Count += Space;
3846 }
3847 else {
3848 while (parseOptionalToken(AsmToken::Comma)) {
3849 SMLoc Loc = getLoc();
3850 StringRef NextKind;
3851 unsigned Reg;
3852 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3853 if (ParseRes != MatchOperand_Success)
3854 return ParseRes;
3855
3856 // Any Kind suffices must match on all regs in the list.
3857 if (Kind != NextKind) {
3858 Error(Loc, "mismatched register size suffix");
3859 return MatchOperand_ParseFail;
3860 }
3861
3862 // Registers must be incremental (with wraparound at 31)
3863 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3864 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3865 Error(Loc, "registers must be sequential");
3866 return MatchOperand_ParseFail;
3867 }
3868
3869 PrevReg = Reg;
3870 ++Count;
3871 }
3872 }
3873
3874 if (parseToken(AsmToken::RCurly, "'}' expected"))
3875 return MatchOperand_ParseFail;
3876
3877 if (Count > 4) {
3878 Error(S, "invalid number of vectors");
3879 return MatchOperand_ParseFail;
3880 }
3881
3882 unsigned NumElements = 0;
3883 unsigned ElementWidth = 0;
3884 if (!Kind.empty()) {
3885 if (const auto &VK = parseVectorKind(Kind, VectorKind))
3886 std::tie(NumElements, ElementWidth) = *VK;
3887 }
3888
3889 Operands.push_back(AArch64Operand::CreateVectorList(
3890 FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3891 getContext()));
3892
3893 return MatchOperand_Success;
3894}
3895
3896/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3897bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3898 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3899 if (ParseRes != MatchOperand_Success)
3900 return true;
3901
3902 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3903}
3904
3905OperandMatchResultTy
3906AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3907 SMLoc StartLoc = getLoc();
3908
3909 unsigned RegNum;
3910 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3911 if (Res != MatchOperand_Success)
3912 return Res;
3913
3914 if (!parseOptionalToken(AsmToken::Comma)) {
3915 Operands.push_back(AArch64Operand::CreateReg(
3916 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3917 return MatchOperand_Success;
3918 }
3919
3920 parseOptionalToken(AsmToken::Hash);
3921
3922 if (getParser().getTok().isNot(AsmToken::Integer)) {
3923 Error(getLoc(), "index must be absent or #0");
3924 return MatchOperand_ParseFail;
3925 }
3926
3927 const MCExpr *ImmVal;
3928 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3929 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3930 Error(getLoc(), "index must be absent or #0");
3931 return MatchOperand_ParseFail;
3932 }
3933
3934 Operands.push_back(AArch64Operand::CreateReg(
3935 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3936 return MatchOperand_Success;
3937}
3938
3939template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3940OperandMatchResultTy
3941AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3942 SMLoc StartLoc = getLoc();
3943
3944 unsigned RegNum;
3945 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3946 if (Res != MatchOperand_Success)
3947 return Res;
3948
3949 // No shift/extend is the default.
3950 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3951 Operands.push_back(AArch64Operand::CreateReg(
3952 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3953 return MatchOperand_Success;
3954 }
3955
3956 // Eat the comma
3957 getParser().Lex();
3958
3959 // Match the shift
3960 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
3961 Res = tryParseOptionalShiftExtend(ExtOpnd);
3962 if (Res != MatchOperand_Success)
3963 return Res;
3964
3965 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3966 Operands.push_back(AArch64Operand::CreateReg(
3967 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3968 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3969 Ext->hasShiftExtendAmount()));
3970
3971 return MatchOperand_Success;
3972}
3973
3974bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3975 MCAsmParser &Parser = getParser();
3976
3977 // Some SVE instructions have a decoration after the immediate, i.e.
3978 // "mul vl". We parse them here and add tokens, which must be present in the
3979 // asm string in the tablegen instruction.
3980 bool NextIsVL =
3981 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
3982 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3983 if (!Parser.getTok().getString().equals_insensitive("mul") ||
3984 !(NextIsVL || NextIsHash))
3985 return true;
3986
3987 Operands.push_back(
3988 AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3989 Parser.Lex(); // Eat the "mul"
3990
3991 if (NextIsVL) {
3992 Operands.push_back(
3993 AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3994 Parser.Lex(); // Eat the "vl"
3995 return false;
3996 }
3997
3998 if (NextIsHash) {
3999 Parser.Lex(); // Eat the #
4000 SMLoc S = getLoc();
4001
4002 // Parse immediate operand.
4003 const MCExpr *ImmVal;
4004 if (!Parser.parseExpression(ImmVal))
4005 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4006 Operands.push_back(AArch64Operand::CreateImm(
4007 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4008 getContext()));
4009 return MatchOperand_Success;
4010 }
4011 }
4012
4013 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4014}
4015
4016bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4017 MCAsmParser &Parser = getParser();
4018 auto Tok = Parser.getTok();
4019 if (Tok.isNot(AsmToken::Identifier))
4020 return true;
4021
4022 auto Keyword = Tok.getString();
4023 Keyword = StringSwitch<StringRef>(Keyword.lower())
4024 .Case("sm", "sm")
4025 .Case("za", "za")
4026 .Default(Keyword);
4027 Operands.push_back(
4028 AArch64Operand::CreateToken(Keyword, false, Tok.getLoc(), getContext()));
4029
4030 Parser.Lex();
4031 return false;
4032}
4033
4034/// parseOperand - Parse a arm instruction operand. For now this parses the
4035/// operand regardless of the mnemonic.
4036bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4037 bool invertCondCode) {
4038 MCAsmParser &Parser = getParser();
4039
4040 OperandMatchResultTy ResTy =
4041 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
4042
4043 // Check if the current operand has a custom associated parser, if so, try to
4044 // custom parse the operand, or fallback to the general approach.
4045 if (ResTy == MatchOperand_Success)
4046 return false;
4047 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4048 // there was a match, but an error occurred, in which case, just return that
4049 // the operand parsing failed.
4050 if (ResTy == MatchOperand_ParseFail)
4051 return true;
4052
4053 // Nothing custom, so do general case parsing.
4054 SMLoc S, E;
4055 switch (getLexer().getKind()) {
4056 default: {
4057 SMLoc S = getLoc();
4058 const MCExpr *Expr;
4059 if (parseSymbolicImmVal(Expr))
4060 return Error(S, "invalid operand");
4061
4062 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4063 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4064 return false;
4065 }
4066 case AsmToken::LBrac: {
4067 SMLoc Loc = Parser.getTok().getLoc();
4068 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
4069 getContext()));
4070 Parser.Lex(); // Eat '['
4071
4072 // There's no comma after a '[', so we can parse the next operand
4073 // immediately.
4074 return parseOperand(Operands, false, false);
4075 }
4076 case AsmToken::LCurly: {
4077 if (!parseNeonVectorList(Operands))
4078 return false;
4079
4080 SMLoc Loc = Parser.getTok().getLoc();
4081 Operands.push_back(
4082 AArch64Operand::CreateToken("{", false, Loc, getContext()));
4083 Parser.Lex(); // Eat '{'
4084
4085 // There's no comma after a '{', so we can parse the next operand
4086 // immediately.
4087 return parseOperand(Operands, false, false);
4088 }
4089 case AsmToken::Identifier: {
4090 // If we're expecting a Condition Code operand, then just parse that.
4091 if (isCondCode)
4092 return parseCondCode(Operands, invertCondCode);
4093
4094 // If it's a register name, parse it.
4095 if (!parseRegister(Operands))
4096 return false;
4097
4098 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4099 // by SVE instructions.
4100 if (!parseOptionalMulOperand(Operands))
4101 return false;
4102
4103 // If this is an "smstart" or "smstop" instruction, parse its special
4104 // keyword operand as an identifier.
4105 if (Mnemonic == "smstart" || Mnemonic == "smstop")
4106 return parseKeywordOperand(Operands);
4107
4108 // This could be an optional "shift" or "extend" operand.
4109 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
4110 // We can only continue if no tokens were eaten.
4111 if (GotShift != MatchOperand_NoMatch)
4112 return GotShift;
4113
4114 // If this is a two-word mnemonic, parse its special keyword
4115 // operand as an identifier.
4116 if (Mnemonic == "brb")
4117 return parseKeywordOperand(Operands);
4118
4119 // This was not a register so parse other operands that start with an
4120 // identifier (like labels) as expressions and create them as immediates.
4121 const MCExpr *IdVal;
4122 S = getLoc();
4123 if (getParser().parseExpression(IdVal))
4124 return true;
4125 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4126 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4127 return false;
4128 }
4129 case AsmToken::Integer:
4130 case AsmToken::Real:
4131 case AsmToken::Hash: {
4132 // #42 -> immediate.
4133 S = getLoc();
4134
4135 parseOptionalToken(AsmToken::Hash);
4136
4137 // Parse a negative sign
4138 bool isNegative = false;
4139 if (Parser.getTok().is(AsmToken::Minus)) {
4140 isNegative = true;
4141 // We need to consume this token only when we have a Real, otherwise
4142 // we let parseSymbolicImmVal take care of it
4143 if (Parser.getLexer().peekTok().is(AsmToken::Real))
4144 Parser.Lex();
4145 }
4146
4147 // The only Real that should come through here is a literal #0.0 for
4148 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4149 // so convert the value.
4150 const AsmToken &Tok = Parser.getTok();
4151 if (Tok.is(AsmToken::Real)) {
4152 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4153 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4154 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4155 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4156 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4157 return TokError("unexpected floating point literal");
4158 else if (IntVal != 0 || isNegative)
4159 return TokError("expected floating-point constant #0.0");
4160 Parser.Lex(); // Eat the token.
4161
4162 Operands.push_back(
4163 AArch64Operand::CreateToken("#0", false, S, getContext()));
4164 Operands.push_back(
4165 AArch64Operand::CreateToken(".0", false, S, getContext()));
4166 return false;
4167 }
4168
4169 const MCExpr *ImmVal;
4170 if (parseSymbolicImmVal(ImmVal))
4171 return true;
4172
4173 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4174 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4175 return false;
4176 }
4177 case AsmToken::Equal: {
4178 SMLoc Loc = getLoc();
4179 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4180 return TokError("unexpected token in operand");
4181 Parser.Lex(); // Eat '='
4182 const MCExpr *SubExprVal;
4183 if (getParser().parseExpression(SubExprVal))
4184 return true;
4185
4186 if (Operands.size() < 2 ||
4187 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4188 return Error(Loc, "Only valid when first operand is register");
4189
4190 bool IsXReg =
4191 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4192 Operands[1]->getReg());
4193
4194 MCContext& Ctx = getContext();
4195 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4196 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4197 if (isa<MCConstantExpr>(SubExprVal)) {
4198 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4199 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4200 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
4201 ShiftAmt += 16;
4202 Imm >>= 16;
4203 }
4204 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4205 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
4206 Operands.push_back(AArch64Operand::CreateImm(
4207 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4208 if (ShiftAmt)
4209 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4210 ShiftAmt, true, S, E, Ctx));
4211 return false;
4212 }
4213 APInt Simm = APInt(64, Imm << ShiftAmt);
4214 // check if the immediate is an unsigned or signed 32-bit int for W regs
4215 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4216 return Error(Loc, "Immediate too large for register");
4217 }
4218 // If it is a label or an imm that cannot fit in a movz, put it into CP.
4219 const MCExpr *CPLoc =
4220 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4221 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4222 return false;
4223 }
4224 }
4225}
4226
4227bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4228 const MCExpr *Expr = nullptr;
4229 SMLoc L = getLoc();
4230 if (check(getParser().parseExpression(Expr), L, "expected expression"))
4231 return true;
4232 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4233 if (check(!Value, L, "expected constant expression"))
4234 return true;
4235 Out = Value->getValue();
4236 return false;
4237}
4238
4239bool AArch64AsmParser::parseComma() {
4240 if (check(getParser().getTok().isNot(AsmToken::Comma), getLoc(),
4241 "expected comma"))
4242 return true;
4243 // Eat the comma
4244 getParser().Lex();
4245 return false;
4246}
4247
4248bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
4249 unsigned First, unsigned Last) {
4250 unsigned Reg;
23
'Reg' declared without an initial value
4251 SMLoc Start, End;
4252 if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register"))
24
Calling 'AArch64AsmParser::ParseRegister'
33
Returning from 'AArch64AsmParser::ParseRegister'
34
Assuming the condition is false
35
Taking false branch
4253 return true;
4254
4255 // Special handling for FP and LR; they aren't linearly after x28 in
4256 // the registers enum.
4257 unsigned RangeEnd = Last;
4258 if (Base
35.1
'Base' is equal to X0
== AArch64::X0) {
36
Taking true branch
4259 if (Last
36.1
'Last' is not equal to FP
== AArch64::FP) {
37
Taking false branch
4260 RangeEnd = AArch64::X28;
4261 if (Reg == AArch64::FP) {
4262 Out = 29;
4263 return false;
4264 }
4265 }
4266 if (Last
37.1
'Last' is equal to LR
== AArch64::LR) {
38
Taking true branch
4267 RangeEnd = AArch64::X28;
4268 if (Reg == AArch64::FP) {
39
The left operand of '==' is a garbage value
4269 Out = 29;
4270 return false;
4271 } else if (Reg == AArch64::LR) {
4272 Out = 30;
4273 return false;
4274 }
4275 }
4276 }
4277
4278 if (check(Reg < First || Reg > RangeEnd, Start,
4279 Twine("expected register in range ") +
4280 AArch64InstPrinter::getRegisterName(First) + " to " +
4281 AArch64InstPrinter::getRegisterName(Last)))
4282 return true;
4283 Out = Reg - Base;
4284 return false;
4285}
4286
4287bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
4288 const MCParsedAsmOperand &Op2) const {
4289 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
4290 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
4291 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
4292 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
4293 return MCTargetAsmParser::regsEqual(Op1, Op2);
4294
4295 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&(static_cast <bool> (AOp1.isScalarReg() && AOp2
.isScalarReg() && "Testing equality of non-scalar registers not supported"
) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4296, __extension__ __PRETTY_FUNCTION__))
4296 "Testing equality of non-scalar registers not supported")(static_cast <bool> (AOp1.isScalarReg() && AOp2
.isScalarReg() && "Testing equality of non-scalar registers not supported"
) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4296, __extension__ __PRETTY_FUNCTION__))
;
4297
4298 // Check if a registers match their sub/super register classes.
4299 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
4300 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
4301 if (AOp1.getRegEqualityTy() == EqualsSubReg)
4302 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
4303 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
4304 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
4305 if (AOp2.getRegEqualityTy() == EqualsSubReg)
4306 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
4307
4308 return false;
4309}
4310
4311/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
4312/// operands.
4313bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
4314 StringRef Name, SMLoc NameLoc,
4315 OperandVector &Operands) {
4316 MCAsmParser &Parser = getParser();
4317 Name = StringSwitch<StringRef>(Name.lower())
4318 .Case("beq", "b.eq")
4319 .Case("bne", "b.ne")
4320 .Case("bhs", "b.hs")
4321 .Case("bcs", "b.cs")
4322 .Case("blo", "b.lo")
4323 .Case("bcc", "b.cc")
4324 .Case("bmi", "b.mi")
4325 .Case("bpl", "b.pl")
4326 .Case("bvs", "b.vs")
4327 .Case("bvc", "b.vc")
4328 .Case("bhi", "b.hi")
4329 .Case("bls", "b.ls")
4330 .Case("bge", "b.ge")
4331 .Case("blt", "b.lt")
4332 .Case("bgt", "b.gt")
4333 .Case("ble", "b.le")
4334 .Case("bal", "b.al")
4335 .Case("bnv", "b.nv")
4336 .Default(Name);
4337
4338 // First check for the AArch64-specific .req directive.
4339 if (Parser.getTok().is(AsmToken::Identifier) &&
4340 Parser.getTok().getIdentifier().lower() == ".req") {
4341 parseDirectiveReq(Name, NameLoc);
4342 // We always return 'error' for this, as we're done with this
4343 // statement and don't need to match the 'instruction."
4344 return true;
4345 }
4346
4347 // Create the leading tokens for the mnemonic, split by '.' characters.
4348 size_t Start = 0, Next = Name.find('.');
4349 StringRef Head = Name.slice(Start, Next);
4350
4351 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
4352 // the SYS instruction.
4353 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
4354 Head == "cfp" || Head == "dvp" || Head == "cpp")
4355 return parseSysAlias(Head, NameLoc, Operands);
4356
4357 Operands.push_back(
4358 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
4359 Mnemonic = Head;
4360
4361 // Handle condition codes for a branch mnemonic
4362 if (Head == "b" && Next != StringRef::npos) {
4363 Start = Next;
4364 Next = Name.find('.', Start + 1);
4365 Head = Name.slice(Start + 1, Next);
4366
4367 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4368 (Head.data() - Name.data()));
4369 AArch64CC::CondCode CC = parseCondCodeString(Head);
4370 if (CC == AArch64CC::Invalid)
4371 return Error(SuffixLoc, "invalid condition code");
4372 Operands.push_back(
4373 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
4374 Operands.push_back(
4375 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
4376 }
4377
4378 // Add the remaining tokens in the mnemonic.
4379 while (Next != StringRef::npos) {
4380 Start = Next;
4381 Next = Name.find('.', Start + 1);
4382 Head = Name.slice(Start, Next);
4383 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4384 (Head.data() - Name.data()) + 1);
4385 Operands.push_back(
4386 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
4387 }
4388
4389 // Conditional compare instructions have a Condition Code operand, which needs
4390 // to be parsed and an immediate operand created.
4391 bool condCodeFourthOperand =
4392 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
4393 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
4394 Head == "csinc" || Head == "csinv" || Head == "csneg");
4395
4396 // These instructions are aliases to some of the conditional select
4397 // instructions. However, the condition code is inverted in the aliased
4398 // instruction.
4399 //
4400 // FIXME: Is this the correct way to handle these? Or should the parser
4401 // generate the aliased instructions directly?
4402 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
4403 bool condCodeThirdOperand =
4404 (Head == "cinc" || Head == "cinv" || Head == "cneg");
4405
4406 // Read the remaining operands.
4407 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4408
4409 unsigned N = 1;
4410 do {
4411 // Parse and remember the operand.
4412 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
4413 (N == 3 && condCodeThirdOperand) ||
4414 (N == 2 && condCodeSecondOperand),
4415 condCodeSecondOperand || condCodeThirdOperand)) {
4416 return true;
4417 }
4418
4419 // After successfully parsing some operands there are three special cases
4420 // to consider (i.e. notional operands not separated by commas). Two are
4421 // due to memory specifiers:
4422 // + An RBrac will end an address for load/store/prefetch
4423 // + An '!' will indicate a pre-indexed operation.
4424 //
4425 // And a further case is '}', which ends a group of tokens specifying the
4426 // SME accumulator array 'ZA' or tile vector, i.e.
4427 //
4428 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
4429 //
4430 // It's someone else's responsibility to make sure these tokens are sane
4431 // in the given context!
4432
4433 if (parseOptionalToken(AsmToken::RBrac))
4434 Operands.push_back(
4435 AArch64Operand::CreateToken("]", false, getLoc(), getContext()));
4436 if (parseOptionalToken(AsmToken::Exclaim))
4437 Operands.push_back(
4438 AArch64Operand::CreateToken("!", false, getLoc(), getContext()));
4439 if (parseOptionalToken(AsmToken::RCurly))
4440 Operands.push_back(
4441 AArch64Operand::CreateToken("}", false, getLoc(), getContext()));
4442
4443 ++N;
4444 } while (parseOptionalToken(AsmToken::Comma));
4445 }
4446
4447 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4448 return true;
4449
4450 return false;
4451}
4452
4453static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
4454 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31))(static_cast <bool> ((ZReg >= AArch64::Z0) &&
(ZReg <= AArch64::Z31)) ? void (0) : __assert_fail ("(ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4454, __extension__ __PRETTY_FUNCTION__))
;
4455 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
4456 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
4457 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
4458 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
4459 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
4460 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
4461}
4462
4463// FIXME: This entire function is a giant hack to provide us with decent
4464// operand range validation/diagnostics until TableGen/MC can be extended
4465// to support autogeneration of this kind of validation.
4466bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
4467 SmallVectorImpl<SMLoc> &Loc) {
4468 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4469 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
4470
4471 // A prefix only applies to the instruction following it. Here we extract
4472 // prefix information for the next instruction before validating the current
4473 // one so that in the case of failure we don't erronously continue using the
4474 // current prefix.
4475 PrefixInfo Prefix = NextPrefix;
4476 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
4477
4478 // Before validating the instruction in isolation we run through the rules
4479 // applicable when it follows a prefix instruction.
4480 // NOTE: brk & hlt can be prefixed but require no additional validation.
4481 if (Prefix.isActive() &&
4482 (Inst.getOpcode() != AArch64::BRK) &&
4483 (Inst.getOpcode() != AArch64::HLT)) {
4484
4485 // Prefixed intructions must have a destructive operand.
4486 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
4487 AArch64::NotDestructive)
4488 return Error(IDLoc, "instruction is unpredictable when following a"
4489 " movprfx, suggest replacing movprfx with mov");
4490
4491 // Destination operands must match.
4492 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
4493 return Error(Loc[0], "instruction is unpredictable when following a"
4494 " movprfx writing to a different destination");
4495
4496 // Destination operand must not be used in any other location.
4497 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
4498 if (Inst.getOperand(i).isReg() &&
4499 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
4500 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
4501 return Error(Loc[0], "instruction is unpredictable when following a"
4502 " movprfx and destination also used as non-destructive"
4503 " source");
4504 }
4505
4506 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
4507 if (Prefix.isPredicated()) {
4508 int PgIdx = -1;
4509
4510 // Find the instructions general predicate.
4511 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
4512 if (Inst.getOperand(i).isReg() &&
4513 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
4514 PgIdx = i;
4515 break;
4516 }
4517
4518 // Instruction must be predicated if the movprfx is predicated.
4519 if (PgIdx == -1 ||
4520 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
4521 return Error(IDLoc, "instruction is unpredictable when following a"
4522 " predicated movprfx, suggest using unpredicated movprfx");
4523
4524 // Instruction must use same general predicate as the movprfx.
4525 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
4526 return Error(IDLoc, "instruction is unpredictable when following a"
4527 " predicated movprfx using a different general predicate");
4528
4529 // Instruction element type must match the movprfx.
4530 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
4531 return Error(IDLoc, "instruction is unpredictable when following a"
4532 " predicated movprfx with a different element size");
4533 }
4534 }
4535
4536 // Check for indexed addressing modes w/ the base register being the
4537 // same as a destination/source register or pair load where
4538 // the Rt == Rt2. All of those are undefined behaviour.
4539 switch (Inst.getOpcode()) {
4540 case AArch64::LDPSWpre:
4541 case AArch64::LDPWpost:
4542 case AArch64::LDPWpre:
4543 case AArch64::LDPXpost:
4544 case AArch64::LDPXpre: {
4545 unsigned Rt = Inst.getOperand(1).getReg();
4546 unsigned Rt2 = Inst.getOperand(2).getReg();
4547 unsigned Rn = Inst.getOperand(3).getReg();
4548 if (RI->isSubRegisterEq(Rn, Rt))
4549 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
4550 "is also a destination");
4551 if (RI->isSubRegisterEq(Rn, Rt2))
4552 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
4553 "is also a destination");
4554 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4555 }
4556 case AArch64::LDPDi:
4557 case AArch64::LDPQi:
4558 case AArch64::LDPSi:
4559 case AArch64::LDPSWi:
4560 case AArch64::LDPWi:
4561 case AArch64::LDPXi: {
4562 unsigned Rt = Inst.getOperand(0).getReg();
4563 unsigned Rt2 = Inst.getOperand(1).getReg();
4564 if (Rt == Rt2)
4565 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4566 break;
4567 }
4568 case AArch64::LDPDpost:
4569 case AArch64::LDPDpre:
4570 case AArch64::LDPQpost:
4571 case AArch64::LDPQpre:
4572 case AArch64::LDPSpost:
4573 case AArch64::LDPSpre:
4574 case AArch64::LDPSWpost: {
4575 unsigned Rt = Inst.getOperand(1).getReg();
4576 unsigned Rt2 = Inst.getOperand(2).getReg();
4577 if (Rt == Rt2)
4578 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4579 break;
4580 }
4581 case AArch64::STPDpost:
4582 case AArch64::STPDpre:
4583 case AArch64::STPQpost:
4584 case AArch64::STPQpre:
4585 case AArch64::STPSpost:
4586 case AArch64::STPSpre:
4587 case AArch64::STPWpost:
4588 case AArch64::STPWpre:
4589 case AArch64::STPXpost:
4590 case AArch64::STPXpre: {
4591 unsigned Rt = Inst.getOperand(1).getReg();
4592 unsigned Rt2 = Inst.getOperand(2).getReg();
4593 unsigned Rn = Inst.getOperand(3).getReg();
4594 if (RI->isSubRegisterEq(Rn, Rt))
4595 return Error(Loc[0], "unpredictable STP instruction, writeback base "
4596 "is also a source");
4597 if (RI->isSubRegisterEq(Rn, Rt2))
4598 return Error(Loc[1], "unpredictable STP instruction, writeback base "
4599 "is also a source");
4600 break;
4601 }
4602 case AArch64::LDRBBpre:
4603 case AArch64::LDRBpre:
4604 case AArch64::LDRHHpre:
4605 case AArch64::LDRHpre:
4606 case AArch64::LDRSBWpre:
4607 case AArch64::LDRSBXpre:
4608 case AArch64::LDRSHWpre:
4609 case AArch64::LDRSHXpre:
4610 case AArch64::LDRSWpre:
4611 case AArch64::LDRWpre:
4612 case AArch64::LDRXpre:
4613 case AArch64::LDRBBpost:
4614 case AArch64::LDRBpost:
4615 case AArch64::LDRHHpost:
4616 case AArch64::LDRHpost:
4617 case AArch64::LDRSBWpost:
4618 case AArch64::LDRSBXpost:
4619 case AArch64::LDRSHWpost:
4620 case AArch64::LDRSHXpost:
4621 case AArch64::LDRSWpost:
4622 case AArch64::LDRWpost:
4623 case AArch64::LDRXpost: {
4624 unsigned Rt = Inst.getOperand(1).getReg();
4625 unsigned Rn = Inst.getOperand(2).getReg();
4626 if (RI->isSubRegisterEq(Rn, Rt))
4627 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4628 "is also a source");
4629 break;
4630 }
4631 case AArch64::STRBBpost:
4632 case AArch64::STRBpost:
4633 case AArch64::STRHHpost:
4634 case AArch64::STRHpost:
4635 case AArch64::STRWpost:
4636 case AArch64::STRXpost:
4637 case AArch64::STRBBpre:
4638 case AArch64::STRBpre:
4639 case AArch64::STRHHpre:
4640 case AArch64::STRHpre:
4641 case AArch64::STRWpre:
4642 case AArch64::STRXpre: {
4643 unsigned Rt = Inst.getOperand(1).getReg();
4644 unsigned Rn = Inst.getOperand(2).getReg();
4645 if (RI->isSubRegisterEq(Rn, Rt))
4646 return Error(Loc[0], "unpredictable STR instruction, writeback base "
4647 "is also a source");
4648 break;
4649 }
4650 case AArch64::STXRB:
4651 case AArch64::STXRH:
4652 case AArch64::STXRW:
4653 case AArch64::STXRX:
4654 case AArch64::STLXRB:
4655 case AArch64::STLXRH:
4656 case AArch64::STLXRW:
4657 case AArch64::STLXRX: {
4658 unsigned Rs = Inst.getOperand(0).getReg();
4659 unsigned Rt = Inst.getOperand(1).getReg();
4660 unsigned Rn = Inst.getOperand(2).getReg();
4661 if (RI->isSubRegisterEq(Rt, Rs) ||
4662 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4663 return Error(Loc[0],
4664 "unpredictable STXR instruction, status is also a source");
4665 break;
4666 }
4667 case AArch64::STXPW:
4668 case AArch64::STXPX:
4669 case AArch64::STLXPW:
4670 case AArch64::STLXPX: {
4671 unsigned Rs = Inst.getOperand(0).getReg();
4672 unsigned Rt1 = Inst.getOperand(1).getReg();
4673 unsigned Rt2 = Inst.getOperand(2).getReg();
4674 unsigned Rn = Inst.getOperand(3).getReg();
4675 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4676 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4677 return Error(Loc[0],
4678 "unpredictable STXP instruction, status is also a source");
4679 break;
4680 }
4681 case AArch64::LDRABwriteback:
4682 case AArch64::LDRAAwriteback: {
4683 unsigned Xt = Inst.getOperand(0).getReg();
4684 unsigned Xn = Inst.getOperand(1).getReg();
4685 if (Xt == Xn)
4686 return Error(Loc[0],
4687 "unpredictable LDRA instruction, writeback base"
4688 " is also a destination");
4689 break;
4690 }
4691 }
4692
4693
4694 // Now check immediate ranges. Separate from the above as there is overlap
4695 // in the instructions being checked and this keeps the nested conditionals
4696 // to a minimum.
4697 switch (Inst.getOpcode()) {
4698 case AArch64::ADDSWri:
4699 case AArch64::ADDSXri:
4700 case AArch64::ADDWri:
4701 case AArch64::ADDXri:
4702 case AArch64::SUBSWri:
4703 case AArch64::SUBSXri:
4704 case AArch64::SUBWri:
4705 case AArch64::SUBXri: {
4706 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4707 // some slight duplication here.
4708 if (Inst.getOperand(2).isExpr()) {
4709 const MCExpr *Expr = Inst.getOperand(2).getExpr();
4710 AArch64MCExpr::VariantKind ELFRefKind;
4711 MCSymbolRefExpr::VariantKind DarwinRefKind;
4712 int64_t Addend;
4713 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4714
4715 // Only allow these with ADDXri.
4716 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4717 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4718 Inst.getOpcode() == AArch64::ADDXri)
4719 return false;
4720
4721 // Only allow these with ADDXri/ADDWri
4722 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4723 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4724 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4725 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4726 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4727 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4728 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4729 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4730 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4731 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4732 (Inst.getOpcode() == AArch64::ADDXri ||
4733 Inst.getOpcode() == AArch64::ADDWri))
4734 return false;
4735
4736 // Don't allow symbol refs in the immediate field otherwise
4737 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4738 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4739 // 'cmp w0, 'borked')
4740 return Error(Loc.back(), "invalid immediate expression");
4741 }
4742 // We don't validate more complex expressions here
4743 }
4744 return false;
4745 }
4746 default:
4747 return false;
4748 }
4749}
4750
4751static std::string AArch64MnemonicSpellCheck(StringRef S,
4752 const FeatureBitset &FBS,
4753 unsigned VariantID = 0);
4754
4755bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4756 uint64_t ErrorInfo,
4757 OperandVector &Operands) {
4758 switch (ErrCode) {
4759 case Match_InvalidTiedOperand: {
4760 RegConstraintEqualityTy EqTy =
4761 static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4762 .getRegEqualityTy();
4763 switch (EqTy) {
4764 case RegConstraintEqualityTy::EqualsSubReg:
4765 return Error(Loc, "operand must be 64-bit form of destination register");
4766 case RegConstraintEqualityTy::EqualsSuperReg:
4767 return Error(Loc, "operand must be 32-bit form of destination register");
4768 case RegConstraintEqualityTy::EqualsReg:
4769 return Error(Loc, "operand must match destination register");
4770 }
4771 llvm_unreachable("Unknown RegConstraintEqualityTy")::llvm::llvm_unreachable_internal("Unknown RegConstraintEqualityTy"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4771)
;
4772 }
4773 case Match_MissingFeature:
4774 return Error(Loc,
4775 "instruction requires a CPU feature not currently enabled");
4776 case Match_InvalidOperand:
4777 return Error(Loc, "invalid operand for instruction");
4778 case Match_InvalidSuffix:
4779 return Error(Loc, "invalid type suffix for instruction");
4780 case Match_InvalidCondCode:
4781 return Error(Loc, "expected AArch64 condition code");
4782 case Match_AddSubRegExtendSmall:
4783 return Error(Loc,
4784 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4785 case Match_AddSubRegExtendLarge:
4786 return Error(Loc,
4787 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4788 case Match_AddSubSecondSource:
4789 return Error(Loc,
4790 "expected compatible register, symbol or integer in range [0, 4095]");
4791 case Match_LogicalSecondSource:
4792 return Error(Loc, "expected compatible register or logical immediate");
4793 case Match_InvalidMovImm32Shift:
4794 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4795 case Match_InvalidMovImm64Shift:
4796 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4797 case Match_AddSubRegShift32:
4798 return Error(Loc,
4799 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4800 case Match_AddSubRegShift64:
4801 return Error(Loc,
4802 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4803 case Match_InvalidFPImm:
4804 return Error(Loc,
4805 "expected compatible register or floating-point constant");
4806 case Match_InvalidMemoryIndexedSImm6:
4807 return Error(Loc, "index must be an integer in range [-32, 31].");
4808 case Match_InvalidMemoryIndexedSImm5:
4809 return Error(Loc, "index must be an integer in range [-16, 15].");
4810 case Match_InvalidMemoryIndexed1SImm4:
4811 return Error(Loc, "index must be an integer in range [-8, 7].");
4812 case Match_InvalidMemoryIndexed2SImm4:
4813 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4814 case Match_InvalidMemoryIndexed3SImm4:
4815 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4816 case Match_InvalidMemoryIndexed4SImm4:
4817 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4818 case Match_InvalidMemoryIndexed16SImm4:
4819 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4820 case Match_InvalidMemoryIndexed32SImm4:
4821 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
4822 case Match_InvalidMemoryIndexed1SImm6:
4823 return Error(Loc, "index must be an integer in range [-32, 31].");
4824 case Match_InvalidMemoryIndexedSImm8:
4825 return Error(Loc, "index must be an integer in range [-128, 127].");
4826 case Match_InvalidMemoryIndexedSImm9:
4827 return Error(Loc, "index must be an integer in range [-256, 255].");
4828 case Match_InvalidMemoryIndexed16SImm9:
4829 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4830 case Match_InvalidMemoryIndexed8SImm10:
4831 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4832 case Match_InvalidMemoryIndexed4SImm7:
4833 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4834 case Match_InvalidMemoryIndexed8SImm7:
4835 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4836 case Match_InvalidMemoryIndexed16SImm7:
4837 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4838 case Match_InvalidMemoryIndexed8UImm5:
4839 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4840 case Match_InvalidMemoryIndexed4UImm5:
4841 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4842 case Match_InvalidMemoryIndexed2UImm5:
4843 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4844 case Match_InvalidMemoryIndexed8UImm6:
4845 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4846 case Match_InvalidMemoryIndexed16UImm6:
4847 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4848 case Match_InvalidMemoryIndexed4UImm6:
4849 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4850 case Match_InvalidMemoryIndexed2UImm6:
4851 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4852 case Match_InvalidMemoryIndexed1UImm6:
4853 return Error(Loc, "index must be in range [0, 63].");
4854 case Match_InvalidMemoryWExtend8:
4855 return Error(Loc,
4856 "expected 'uxtw' or 'sxtw' with optional shift of #0");
4857 case Match_InvalidMemoryWExtend16:
4858 return Error(Loc,
4859 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4860 case Match_InvalidMemoryWExtend32:
4861 return Error(Loc,
4862 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4863 case Match_InvalidMemoryWExtend64:
4864 return Error(Loc,
4865 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4866 case Match_InvalidMemoryWExtend128:
4867 return Error(Loc,
4868 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4869 case Match_InvalidMemoryXExtend8:
4870 return Error(Loc,
4871 "expected 'lsl' or 'sxtx' with optional shift of #0");
4872 case Match_InvalidMemoryXExtend16:
4873 return Error(Loc,
4874 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4875 case Match_InvalidMemoryXExtend32:
4876 return Error(Loc,
4877 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4878 case Match_InvalidMemoryXExtend64:
4879 return Error(Loc,
4880 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4881 case Match_InvalidMemoryXExtend128:
4882 return Error(Loc,
4883 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4884 case Match_InvalidMemoryIndexed1:
4885 return Error(Loc, "index must be an integer in range [0, 4095].");
4886 case Match_InvalidMemoryIndexed2:
4887 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4888 case Match_InvalidMemoryIndexed4:
4889 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4890 case Match_InvalidMemoryIndexed8:
4891 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4892 case Match_InvalidMemoryIndexed16:
4893 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4894 case Match_InvalidImm0_1:
4895 return Error(Loc, "immediate must be an integer in range [0, 1].");
4896 case Match_InvalidImm0_3:
4897 return Error(Loc, "immediate must be an integer in range [0, 3].");
4898 case Match_InvalidImm0_7:
4899 return Error(Loc, "immediate must be an integer in range [0, 7].");
4900 case Match_InvalidImm0_15:
4901 return Error(Loc, "immediate must be an integer in range [0, 15].");
4902 case Match_InvalidImm0_31:
4903 return Error(Loc, "immediate must be an integer in range [0, 31].");
4904 case Match_InvalidImm0_63:
4905 return Error(Loc, "immediate must be an integer in range [0, 63].");
4906 case Match_InvalidImm0_127:
4907 return Error(Loc, "immediate must be an integer in range [0, 127].");
4908 case Match_InvalidImm0_255:
4909 return Error(Loc, "immediate must be an integer in range [0, 255].");
4910 case Match_InvalidImm0_65535:
4911 return Error(Loc, "immediate must be an integer in range [0, 65535].");
4912 case Match_InvalidImm1_8:
4913 return Error(Loc, "immediate must be an integer in range [1, 8].");
4914 case Match_InvalidImm1_16:
4915 return Error(Loc, "immediate must be an integer in range [1, 16].");
4916 case Match_InvalidImm1_32:
4917 return Error(Loc, "immediate must be an integer in range [1, 32].");
4918 case Match_InvalidImm1_64:
4919 return Error(Loc, "immediate must be an integer in range [1, 64].");
4920 case Match_InvalidSVEAddSubImm8:
4921 return Error(Loc, "immediate must be an integer in range [0, 255]"
4922 " with a shift amount of 0");
4923 case Match_InvalidSVEAddSubImm16:
4924 case Match_InvalidSVEAddSubImm32:
4925 case Match_InvalidSVEAddSubImm64:
4926 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4927 "multiple of 256 in range [256, 65280]");
4928 case Match_InvalidSVECpyImm8:
4929 return Error(Loc, "immediate must be an integer in range [-128, 255]"
4930 " with a shift amount of 0");
4931 case Match_InvalidSVECpyImm16:
4932 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4933 "multiple of 256 in range [-32768, 65280]");
4934 case Match_InvalidSVECpyImm32:
4935 case Match_InvalidSVECpyImm64:
4936 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4937 "multiple of 256 in range [-32768, 32512]");
4938 case Match_InvalidIndexRange1_1:
4939 return Error(Loc, "expected lane specifier '[1]'");
4940 case Match_InvalidIndexRange0_15:
4941 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4942 case Match_InvalidIndexRange0_7:
4943 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4944 case Match_InvalidIndexRange0_3:
4945 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4946 case Match_InvalidIndexRange0_1:
4947 return Error(Loc, "vector lane must be an integer in range [0, 1].");
4948 case Match_InvalidSVEIndexRange0_63:
4949 return Error(Loc, "vector lane must be an integer in range [0, 63].");
4950 case Match_InvalidSVEIndexRange0_31:
4951 return Error(Loc, "vector lane must be an integer in range [0, 31].");
4952 case Match_InvalidSVEIndexRange0_15:
4953 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4954 case Match_InvalidSVEIndexRange0_7:
4955 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4956 case Match_InvalidSVEIndexRange0_3:
4957 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4958 case Match_InvalidLabel:
4959 return Error(Loc, "expected label or encodable integer pc offset");
4960 case Match_MRS:
4961 return Error(Loc, "expected readable system register");
4962 case Match_MSR:
4963 case Match_InvalidSVCR:
4964 return Error(Loc, "expected writable system register or pstate");
4965 case Match_InvalidComplexRotationEven:
4966 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4967 case Match_InvalidComplexRotationOdd:
4968 return Error(Loc, "complex rotation must be 90 or 270.");
4969 case Match_MnemonicFail: {
4970 std::string Suggestion = AArch64MnemonicSpellCheck(
4971 ((AArch64Operand &)*Operands[0]).getToken(),
4972 ComputeAvailableFeatures(STI->getFeatureBits()));
4973 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4974 }
4975 case Match_InvalidGPR64shifted8:
4976 return Error(Loc, "register must be x0..x30 or xzr, without shift");
4977 case Match_InvalidGPR64shifted16:
4978 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4979 case Match_InvalidGPR64shifted32:
4980 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4981 case Match_InvalidGPR64shifted64:
4982 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4983 case Match_InvalidGPR64shifted128:
4984 return Error(
4985 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
4986 case Match_InvalidGPR64NoXZRshifted8:
4987 return Error(Loc, "register must be x0..x30 without shift");
4988 case Match_InvalidGPR64NoXZRshifted16:
4989 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4990 case Match_InvalidGPR64NoXZRshifted32:
4991 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4992 case Match_InvalidGPR64NoXZRshifted64:
4993 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4994 case Match_InvalidGPR64NoXZRshifted128:
4995 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
4996 case Match_InvalidZPR32UXTW8:
4997 case Match_InvalidZPR32SXTW8:
4998 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4999 case Match_InvalidZPR32UXTW16:
5000 case Match_InvalidZPR32SXTW16:
5001 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
5002 case Match_InvalidZPR32UXTW32:
5003 case Match_InvalidZPR32SXTW32:
5004 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
5005 case Match_InvalidZPR32UXTW64:
5006 case Match_InvalidZPR32SXTW64:
5007 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
5008 case Match_InvalidZPR64UXTW8:
5009 case Match_InvalidZPR64SXTW8:
5010 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
5011 case Match_InvalidZPR64UXTW16:
5012 case Match_InvalidZPR64SXTW16:
5013 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
5014 case Match_InvalidZPR64UXTW32:
5015 case Match_InvalidZPR64SXTW32:
5016 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
5017 case Match_InvalidZPR64UXTW64:
5018 case Match_InvalidZPR64SXTW64:
5019 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
5020 case Match_InvalidZPR32LSL8:
5021 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
5022 case Match_InvalidZPR32LSL16:
5023 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
5024 case Match_InvalidZPR32LSL32:
5025 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
5026 case Match_InvalidZPR32LSL64:
5027 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
5028 case Match_InvalidZPR64LSL8:
5029 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
5030 case Match_InvalidZPR64LSL16:
5031 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
5032 case Match_InvalidZPR64LSL32:
5033 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
5034 case Match_InvalidZPR64LSL64:
5035 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
5036 case Match_InvalidZPR0:
5037 return Error(Loc, "expected register without element width suffix");
5038 case Match_InvalidZPR8:
5039 case Match_InvalidZPR16:
5040 case Match_InvalidZPR32:
5041 case Match_InvalidZPR64:
5042 case Match_InvalidZPR128:
5043 return Error(Loc, "invalid element width");
5044 case Match_InvalidZPR_3b8:
5045 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
5046 case Match_InvalidZPR_3b16:
5047 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
5048 case Match_InvalidZPR_3b32:
5049 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
5050 case Match_InvalidZPR_4b16:
5051 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
5052 case Match_InvalidZPR_4b32:
5053 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
5054 case Match_InvalidZPR_4b64:
5055 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
5056 case Match_InvalidSVEPattern:
5057 return Error(Loc, "invalid predicate pattern");
5058 case Match_InvalidSVEPredicateAnyReg:
5059 case Match_InvalidSVEPredicateBReg:
5060 case Match_InvalidSVEPredicateHReg:
5061 case Match_InvalidSVEPredicateSReg:
5062 case Match_InvalidSVEPredicateDReg:
5063 return Error(Loc, "invalid predicate register.");
5064 case Match_InvalidSVEPredicate3bAnyReg:
5065 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
5066 case Match_InvalidSVEPredicate3bBReg:
5067 return Error(Loc, "invalid restricted predicate register, expected p0.b..p7.b");
5068 case Match_InvalidSVEPredicate3bHReg:
5069 return Error(Loc, "invalid restricted predicate register, expected p0.h..p7.h");
5070 case Match_InvalidSVEPredicate3bSReg:
5071 return Error(Loc, "invalid restricted predicate register, expected p0.s..p7.s");
5072 case Match_InvalidSVEPredicate3bDReg:
5073 return Error(Loc, "invalid restricted predicate register, expected p0.d..p7.d");
5074 case Match_InvalidSVEExactFPImmOperandHalfOne:
5075 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
5076 case Match_InvalidSVEExactFPImmOperandHalfTwo:
5077 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
5078 case Match_InvalidSVEExactFPImmOperandZeroOne:
5079 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
5080 case Match_InvalidMatrixTileVectorH8:
5081 case Match_InvalidMatrixTileVectorV8:
5082 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
5083 case Match_InvalidMatrixTileVectorH16:
5084 case Match_InvalidMatrixTileVectorV16:
5085 return Error(Loc,
5086 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
5087 case Match_InvalidMatrixTileVectorH32:
5088 case Match_InvalidMatrixTileVectorV32:
5089 return Error(Loc,
5090 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
5091 case Match_InvalidMatrixTileVectorH64:
5092 case Match_InvalidMatrixTileVectorV64:
5093 return Error(Loc,
5094 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
5095 case Match_InvalidMatrixTileVectorH128:
5096 case Match_InvalidMatrixTileVectorV128:
5097 return Error(Loc,
5098 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
5099 case Match_InvalidMatrixTile32:
5100 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
5101 case Match_InvalidMatrixTile64:
5102 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
5103 case Match_InvalidMatrix:
5104 return Error(Loc, "invalid matrix operand, expected za");
5105 default:
5106 llvm_unreachable("unexpected error code!")::llvm::llvm_unreachable_internal("unexpected error code!", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5106)
;
5107 }
5108}
5109
5110static const char *getSubtargetFeatureName(uint64_t Val);
5111
5112bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
5113 OperandVector &Operands,
5114 MCStreamer &Out,
5115 uint64_t &ErrorInfo,
5116 bool MatchingInlineAsm) {
5117 assert(!Operands.empty() && "Unexpect empty operand list!")(static_cast <bool> (!Operands.empty() && "Unexpect empty operand list!"
) ? void (0) : __assert_fail ("!Operands.empty() && \"Unexpect empty operand list!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5117, __extension__ __PRETTY_FUNCTION__))
;
5118 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
5119 assert(Op.isToken() && "Leading operand should always be a mnemonic!")(static_cast <bool> (Op.isToken() && "Leading operand should always be a mnemonic!"
) ? void (0) : __assert_fail ("Op.isToken() && \"Leading operand should always be a mnemonic!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5119, __extension__ __PRETTY_FUNCTION__))
;
5120
5121 StringRef Tok = Op.getToken();
5122 unsigned NumOperands = Operands.size();
5123
5124 if (NumOperands == 4 && Tok == "lsl") {
5125 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
5126 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5127 if (Op2.isScalarReg() && Op3.isImm()) {
5128 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5129 if (Op3CE) {
5130 uint64_t Op3Val = Op3CE->getValue();
5131 uint64_t NewOp3Val = 0;
5132 uint64_t NewOp4Val = 0;
5133 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
5134 Op2.getReg())) {
5135 NewOp3Val = (32 - Op3Val) & 0x1f;
5136 NewOp4Val = 31 - Op3Val;
5137 } else {
5138 NewOp3Val = (64 - Op3Val) & 0x3f;
5139 NewOp4Val = 63 - Op3Val;
5140 }
5141
5142 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
5143 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
5144
5145 Operands[0] = AArch64Operand::CreateToken(
5146 "ubfm", false, Op.getStartLoc(), getContext());
5147 Operands.push_back(AArch64Operand::CreateImm(
5148 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
5149 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
5150 Op3.getEndLoc(), getContext());
5151 }
5152 }
5153 } else if (NumOperands == 4 && Tok == "bfc") {
5154 // FIXME: Horrible hack to handle BFC->BFM alias.
5155 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5156 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
5157 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
5158
5159 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
5160 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
5161 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
5162
5163 if (LSBCE && WidthCE) {
5164 uint64_t LSB = LSBCE->getValue();
5165 uint64_t Width = WidthCE->getValue();
5166
5167 uint64_t RegWidth = 0;
5168 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5169 Op1.getReg()))
5170 RegWidth = 64;
5171 else
5172 RegWidth = 32;
5173
5174 if (LSB >= RegWidth)
5175 return Error(LSBOp.getStartLoc(),
5176 "expected integer in range [0, 31]");
5177 if (Width < 1 || Width > RegWidth)
5178 return Error(WidthOp.getStartLoc(),
5179 "expected integer in range [1, 32]");
5180
5181 uint64_t ImmR = 0;
5182 if (RegWidth == 32)
5183 ImmR = (32 - LSB) & 0x1f;
5184 else
5185 ImmR = (64 - LSB) & 0x3f;
5186
5187 uint64_t ImmS = Width - 1;
5188
5189 if (ImmR != 0 && ImmS >= ImmR)
5190 return Error(WidthOp.getStartLoc(),
5191 "requested insert overflows register");
5192
5193 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
5194 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
5195 Operands[0] = AArch64Operand::CreateToken(
5196 "bfm", false, Op.getStartLoc(), getContext());
5197 Operands[2] = AArch64Operand::CreateReg(
5198 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
5199 SMLoc(), SMLoc(), getContext());
5200 Operands[3] = AArch64Operand::CreateImm(
5201 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
5202 Operands.emplace_back(
5203 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
5204 WidthOp.getEndLoc(), getContext()));
5205 }
5206 }
5207 } else if (NumOperands == 5) {
5208 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
5209 // UBFIZ -> UBFM aliases.
5210 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
5211 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5212 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5213 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
5214
5215 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
5216 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5217 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
5218
5219 if (Op3CE && Op4CE) {
5220 uint64_t Op3Val = Op3CE->getValue();
5221 uint64_t Op4Val = Op4CE->getValue();
5222
5223 uint64_t RegWidth = 0;
5224 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5225 Op1.getReg()))
5226 RegWidth = 64;
5227 else
5228 RegWidth = 32;
5229
5230 if (Op3Val >= RegWidth)
5231 return Error(Op3.getStartLoc(),
5232 "expected integer in range [0, 31]");
5233 if (Op4Val < 1 || Op4Val > RegWidth)
5234 return Error(Op4.getStartLoc(),
5235 "expected integer in range [1, 32]");
5236
5237 uint64_t NewOp3Val = 0;
5238 if (RegWidth == 32)
5239 NewOp3Val = (32 - Op3Val) & 0x1f;
5240 else
5241 NewOp3Val = (64 - Op3Val) & 0x3f;
5242
5243 uint64_t NewOp4Val = Op4Val - 1;
5244
5245 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
5246 return Error(Op4.getStartLoc(),
5247 "requested insert overflows register");
5248
5249 const MCExpr *NewOp3 =
5250 MCConstantExpr::create(NewOp3Val, getContext());
5251 const MCExpr *NewOp4 =
5252 MCConstantExpr::create(NewOp4Val, getContext());
5253 Operands[3] = AArch64Operand::CreateImm(
5254 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
5255 Operands[4] = AArch64Operand::CreateImm(
5256 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
5257 if (Tok == "bfi")
5258 Operands[0] = AArch64Operand::CreateToken(
5259 "bfm", false, Op.getStartLoc(), getContext());
5260 else if (Tok == "sbfiz")
5261 Operands[0] = AArch64Operand::CreateToken(
5262 "sbfm", false, Op.getStartLoc(), getContext());
5263 else if (Tok == "ubfiz")
5264 Operands[0] = AArch64Operand::CreateToken(
5265 "ubfm", false, Op.getStartLoc(), getContext());
5266 else
5267 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5267)
;
5268 }
5269 }
5270
5271 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
5272 // UBFX -> UBFM aliases.
5273 } else if (NumOperands == 5 &&
5274 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
5275 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5276 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5277 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
5278
5279 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
5280 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5281 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
5282
5283 if (Op3CE && Op4CE) {
5284 uint64_t Op3Val = Op3CE->getValue();
5285 uint64_t Op4Val = Op4CE->getValue();
5286
5287 uint64_t RegWidth = 0;
5288 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5289 Op1.getReg()))
5290 RegWidth = 64;
5291 else
5292 RegWidth = 32;
5293
5294 if (Op3Val >= RegWidth)
5295 return Error(Op3.getStartLoc(),
5296 "expected integer in range [0, 31]");
5297 if (Op4Val < 1 || Op4Val > RegWidth)
5298 return Error(Op4.getStartLoc(),
5299 "expected integer in range [1, 32]");
5300
5301 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
5302
5303 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
5304 return Error(Op4.getStartLoc(),
5305 "requested extract overflows register");
5306
5307 const MCExpr *NewOp4 =
5308 MCConstantExpr::create(NewOp4Val, getContext());
5309 Operands[4] = AArch64Operand::CreateImm(
5310 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
5311 if (Tok == "bfxil")
5312 Operands[0] = AArch64Operand::CreateToken(
5313 "bfm", false, Op.getStartLoc(), getContext());
5314 else if (Tok == "sbfx")
5315 Operands[0] = AArch64Operand::CreateToken(
5316 "sbfm", false, Op.getStartLoc(), getContext());
5317 else if (Tok == "ubfx")
5318 Operands[0] = AArch64Operand::CreateToken(
5319 "ubfm", false, Op.getStartLoc(), getContext());
5320 else
5321 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5321)
;
5322 }
5323 }
5324 }
5325 }
5326
5327 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
5328 // instruction for FP registers correctly in some rare circumstances. Convert
5329 // it to a safe instruction and warn (because silently changing someone's
5330 // assembly is rude).
5331 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
5332 NumOperands == 4 && Tok == "movi") {
5333 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5334 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
5335 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5336 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
5337 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
5338 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
5339 if (Suffix.lower() == ".2d" &&
5340 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
5341 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
5342 " correctly on this CPU, converting to equivalent movi.16b");
5343 // Switch the suffix to .16b.
5344 unsigned Idx = Op1.isToken() ? 1 : 2;
5345 Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
5346 getContext());
5347 }
5348 }
5349 }
5350
5351 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
5352 // InstAlias can't quite handle this since the reg classes aren't
5353 // subclasses.
5354 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
5355 // The source register can be Wn here, but the matcher expects a
5356 // GPR64. Twiddle it here if necessary.
5357 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5358 if (Op.isScalarReg()) {
5359 unsigned Reg = getXRegFromWReg(Op.getReg());
5360 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5361 Op.getStartLoc(), Op.getEndLoc(),
5362 getContext());
5363 }
5364 }
5365 // FIXME: Likewise for sxt[bh] with a Xd dst operand
5366 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
5367 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5368 if (Op.isScalarReg() &&
5369 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5370 Op.getReg())) {
5371 // The source register can be Wn here, but the matcher expects a
5372 // GPR64. Twiddle it here if necessary.
5373 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5374 if (Op.isScalarReg()) {
5375 unsigned Reg = getXRegFromWReg(Op.getReg());
5376 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5377 Op.getStartLoc(),
5378 Op.getEndLoc(), getContext());
5379 }
5380 }
5381 }
5382 // FIXME: Likewise for uxt[bh] with a Xd dst operand
5383 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
5384 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5385 if (Op.isScalarReg() &&
5386 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5387 Op.getReg())) {
5388 // The source register can be Wn here, but the matcher expects a
5389 // GPR32. Twiddle it here if necessary.
5390 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5391 if (Op.isScalarReg()) {
5392 unsigned Reg = getWRegFromXReg(Op.getReg());
5393 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5394 Op.getStartLoc(),
5395 Op.getEndLoc(), getContext());
5396 }
5397 }
5398 }
5399
5400 MCInst Inst;
5401 FeatureBitset MissingFeatures;
5402 // First try to match against the secondary set of tables containing the
5403 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
5404 unsigned MatchResult =
5405 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5406 MatchingInlineAsm, 1);
5407
5408 // If that fails, try against the alternate table containing long-form NEON:
5409 // "fadd v0.2s, v1.2s, v2.2s"
5410 if (MatchResult != Match_Success) {
5411 // But first, save the short-form match result: we can use it in case the
5412 // long-form match also fails.
5413 auto ShortFormNEONErrorInfo = ErrorInfo;
5414 auto ShortFormNEONMatchResult = MatchResult;
5415 auto ShortFormNEONMissingFeatures = MissingFeatures;
5416
5417 MatchResult =
5418 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5419 MatchingInlineAsm, 0);
5420
5421 // Now, both matches failed, and the long-form match failed on the mnemonic
5422 // suffix token operand. The short-form match failure is probably more
5423 // relevant: use it instead.
5424 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
5425 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
5426 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
5427 MatchResult = ShortFormNEONMatchResult;
5428 ErrorInfo = ShortFormNEONErrorInfo;
5429 MissingFeatures = ShortFormNEONMissingFeatures;
5430 }
5431 }
5432
5433 switch (MatchResult) {
5434 case Match_Success: {
5435 // Perform range checking and other semantic validations
5436 SmallVector<SMLoc, 8> OperandLocs;
5437 NumOperands = Operands.size();
5438 for (unsigned i = 1; i < NumOperands; ++i)
5439 OperandLocs.push_back(Operands[i]->getStartLoc());
5440 if (validateInstruction(Inst, IDLoc, OperandLocs))
5441 return true;
5442
5443 Inst.setLoc(IDLoc);
5444 Out.emitInstruction(Inst, getSTI());
5445 return false;
5446 }
5447 case Match_MissingFeature: {
5448 assert(MissingFeatures.any() && "Unknown missing feature!")(static_cast <bool> (MissingFeatures.any() && "Unknown missing feature!"
) ? void (0) : __assert_fail ("MissingFeatures.any() && \"Unknown missing feature!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5448, __extension__ __PRETTY_FUNCTION__))
;
5449 // Special case the error message for the very common case where only
5450 // a single subtarget feature is missing (neon, e.g.).
5451 std::string Msg = "instruction requires:";
5452 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
5453 if (MissingFeatures[i]) {
5454 Msg += " ";
5455 Msg += getSubtargetFeatureName(i);
5456 }
5457 }
5458 return Error(IDLoc, Msg);
5459 }
5460 case Match_MnemonicFail:
5461 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
5462 case Match_InvalidOperand: {
5463 SMLoc ErrorLoc = IDLoc;
5464
5465 if (ErrorInfo != ~0ULL) {
5466 if (ErrorInfo >= Operands.size())
5467 return Error(IDLoc, "too few operands for instruction",
5468 SMRange(IDLoc, getTok().getLoc()));
5469
5470 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5471 if (ErrorLoc == SMLoc())
5472 ErrorLoc = IDLoc;
5473 }
5474 // If the match failed on a suffix token operand, tweak the diagnostic
5475 // accordingly.
5476 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
5477 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
5478 MatchResult = Match_InvalidSuffix;
5479
5480 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5481 }
5482 case Match_InvalidTiedOperand:
5483 case Match_InvalidMemoryIndexed1:
5484 case Match_InvalidMemoryIndexed2:
5485 case Match_InvalidMemoryIndexed4:
5486 case Match_InvalidMemoryIndexed8:
5487 case Match_InvalidMemoryIndexed16:
5488 case Match_InvalidCondCode:
5489 case Match_AddSubRegExtendSmall:
5490 case Match_AddSubRegExtendLarge:
5491 case Match_AddSubSecondSource:
5492 case Match_LogicalSecondSource:
5493 case Match_AddSubRegShift32:
5494 case Match_AddSubRegShift64:
5495 case Match_InvalidMovImm32Shift:
5496 case Match_InvalidMovImm64Shift:
5497 case Match_InvalidFPImm:
5498 case Match_InvalidMemoryWExtend8:
5499 case Match_InvalidMemoryWExtend16:
5500 case Match_InvalidMemoryWExtend32:
5501 case Match_InvalidMemoryWExtend64:
5502 case Match_InvalidMemoryWExtend128:
5503 case Match_InvalidMemoryXExtend8:
5504 case Match_InvalidMemoryXExtend16:
5505 case Match_InvalidMemoryXExtend32:
5506 case Match_InvalidMemoryXExtend64:
5507 case Match_InvalidMemoryXExtend128:
5508 case Match_InvalidMemoryIndexed1SImm4:
5509 case Match_InvalidMemoryIndexed2SImm4:
5510 case Match_InvalidMemoryIndexed3SImm4:
5511 case Match_InvalidMemoryIndexed4SImm4:
5512 case Match_InvalidMemoryIndexed1SImm6:
5513 case Match_InvalidMemoryIndexed16SImm4:
5514 case Match_InvalidMemoryIndexed32SImm4:
5515 case Match_InvalidMemoryIndexed4SImm7:
5516 case Match_InvalidMemoryIndexed8SImm7:
5517 case Match_InvalidMemoryIndexed16SImm7:
5518 case Match_InvalidMemoryIndexed8UImm5:
5519 case Match_InvalidMemoryIndexed4UImm5:
5520 case Match_InvalidMemoryIndexed2UImm5:
5521 case Match_InvalidMemoryIndexed1UImm6:
5522 case Match_InvalidMemoryIndexed2UImm6:
5523 case Match_InvalidMemoryIndexed4UImm6:
5524 case Match_InvalidMemoryIndexed8UImm6:
5525 case Match_InvalidMemoryIndexed16UImm6:
5526 case Match_InvalidMemoryIndexedSImm6:
5527 case Match_InvalidMemoryIndexedSImm5:
5528 case Match_InvalidMemoryIndexedSImm8:
5529 case Match_InvalidMemoryIndexedSImm9:
5530 case Match_InvalidMemoryIndexed16SImm9:
5531 case Match_InvalidMemoryIndexed8SImm10:
5532 case Match_InvalidImm0_1:
5533 case Match_InvalidImm0_3:
5534 case Match_InvalidImm0_7:
5535 case Match_InvalidImm0_15:
5536 case Match_InvalidImm0_31:
5537 case Match_InvalidImm0_63:
5538 case Match_InvalidImm0_127:
5539 case Match_InvalidImm0_255:
5540 case Match_InvalidImm0_65535:
5541 case Match_InvalidImm1_8:
5542 case Match_InvalidImm1_16:
5543 case Match_InvalidImm1_32:
5544 case Match_InvalidImm1_64:
5545 case Match_InvalidSVEAddSubImm8:
5546 case Match_InvalidSVEAddSubImm16:
5547 case Match_InvalidSVEAddSubImm32:
5548 case Match_InvalidSVEAddSubImm64:
5549 case Match_InvalidSVECpyImm8:
5550 case Match_InvalidSVECpyImm16:
5551 case Match_InvalidSVECpyImm32:
5552 case Match_InvalidSVECpyImm64:
5553 case Match_InvalidIndexRange1_1:
5554 case Match_InvalidIndexRange0_15:
5555 case Match_InvalidIndexRange0_7:
5556 case Match_InvalidIndexRange0_3:
5557 case Match_InvalidIndexRange0_1:
5558 case Match_InvalidSVEIndexRange0_63:
5559 case Match_InvalidSVEIndexRange0_31:
5560 case Match_InvalidSVEIndexRange0_15:
5561 case Match_InvalidSVEIndexRange0_7:
5562 case Match_InvalidSVEIndexRange0_3:
5563 case Match_InvalidLabel:
5564 case Match_InvalidComplexRotationEven:
5565 case Match_InvalidComplexRotationOdd:
5566 case Match_InvalidGPR64shifted8:
5567 case Match_InvalidGPR64shifted16:
5568 case Match_InvalidGPR64shifted32:
5569 case Match_InvalidGPR64shifted64:
5570 case Match_InvalidGPR64shifted128:
5571 case Match_InvalidGPR64NoXZRshifted8:
5572 case Match_InvalidGPR64NoXZRshifted16:
5573 case Match_InvalidGPR64NoXZRshifted32:
5574 case Match_InvalidGPR64NoXZRshifted64:
5575 case Match_InvalidGPR64NoXZRshifted128:
5576 case Match_InvalidZPR32UXTW8:
5577 case Match_InvalidZPR32UXTW16:
5578 case Match_InvalidZPR32UXTW32:
5579 case Match_InvalidZPR32UXTW64:
5580 case Match_InvalidZPR32SXTW8:
5581 case Match_InvalidZPR32SXTW16:
5582 case Match_InvalidZPR32SXTW32:
5583 case Match_InvalidZPR32SXTW64:
5584 case Match_InvalidZPR64UXTW8:
5585 case Match_InvalidZPR64SXTW8:
5586 case Match_InvalidZPR64UXTW16:
5587 case Match_InvalidZPR64SXTW16:
5588 case Match_InvalidZPR64UXTW32:
5589 case Match_InvalidZPR64SXTW32:
5590 case Match_InvalidZPR64UXTW64:
5591 case Match_InvalidZPR64SXTW64:
5592 case Match_InvalidZPR32LSL8:
5593 case Match_InvalidZPR32LSL16:
5594 case Match_InvalidZPR32LSL32:
5595 case Match_InvalidZPR32LSL64:
5596 case Match_InvalidZPR64LSL8:
5597 case Match_InvalidZPR64LSL16:
5598 case Match_InvalidZPR64LSL32:
5599 case Match_InvalidZPR64LSL64:
5600 case Match_InvalidZPR0:
5601 case Match_InvalidZPR8:
5602 case Match_InvalidZPR16:
5603 case Match_InvalidZPR32:
5604 case Match_InvalidZPR64:
5605 case Match_InvalidZPR128:
5606 case Match_InvalidZPR_3b8:
5607 case Match_InvalidZPR_3b16:
5608 case Match_InvalidZPR_3b32:
5609 case Match_InvalidZPR_4b16:
5610 case Match_InvalidZPR_4b32:
5611 case Match_InvalidZPR_4b64:
5612 case Match_InvalidSVEPredicateAnyReg:
5613 case Match_InvalidSVEPattern:
5614 case Match_InvalidSVEPredicateBReg:
5615 case Match_InvalidSVEPredicateHReg:
5616 case Match_InvalidSVEPredicateSReg:
5617 case Match_InvalidSVEPredicateDReg:
5618 case Match_InvalidSVEPredicate3bAnyReg:
5619 case Match_InvalidSVEPredicate3bBReg:
5620 case Match_InvalidSVEPredicate3bHReg:
5621 case Match_InvalidSVEPredicate3bSReg:
5622 case Match_InvalidSVEPredicate3bDReg:
5623 case Match_InvalidSVEExactFPImmOperandHalfOne:
5624 case Match_InvalidSVEExactFPImmOperandHalfTwo:
5625 case Match_InvalidSVEExactFPImmOperandZeroOne:
5626 case Match_InvalidMatrixTile32:
5627 case Match_InvalidMatrixTile64:
5628 case Match_InvalidMatrix:
5629 case Match_InvalidMatrixTileVectorH8:
5630 case Match_InvalidMatrixTileVectorH16:
5631 case Match_InvalidMatrixTileVectorH32:
5632 case Match_InvalidMatrixTileVectorH64:
5633 case Match_InvalidMatrixTileVectorH128:
5634 case Match_InvalidMatrixTileVectorV8:
5635 case Match_InvalidMatrixTileVectorV16:
5636 case Match_InvalidMatrixTileVectorV32:
5637 case Match_InvalidMatrixTileVectorV64:
5638 case Match_InvalidMatrixTileVectorV128:
5639 case Match_InvalidSVCR:
5640 case Match_MSR:
5641 case Match_MRS: {
5642 if (ErrorInfo >= Operands.size())
5643 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5644 // Any time we get here, there's nothing fancy to do. Just get the
5645 // operand SMLoc and display the diagnostic.
5646 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5647 if (ErrorLoc == SMLoc())
5648 ErrorLoc = IDLoc;
5649 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5650 }
5651 }
5652
5653 llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5653)
;
5654}
5655
5656/// ParseDirective parses the arm specific directives
5657bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5658 const MCContext::Environment Format = getContext().getObjectFileType();
5659 bool IsMachO = Format == MCContext::IsMachO;
1
Assuming 'Format' is not equal to IsMachO
5660 bool IsCOFF = Format == MCContext::IsCOFF;
2
Assuming 'Format' is equal to IsCOFF
5661
5662 auto IDVal = DirectiveID.getIdentifier().lower();
5663 SMLoc Loc = DirectiveID.getLoc();
5664 if (IDVal == ".arch")
3
Taking false branch
5665 parseDirectiveArch(Loc);
5666 else if (IDVal == ".cpu")
4
Taking false branch
5667 parseDirectiveCPU(Loc);
5668 else if (IDVal == ".tlsdesccall")
5
Taking false branch
5669 parseDirectiveTLSDescCall(Loc);
5670 else if (IDVal == ".ltorg" || IDVal == ".pool")
6
Taking false branch
5671 parseDirectiveLtorg(Loc);
5672 else if (IDVal == ".unreq")
7
Taking false branch
5673 parseDirectiveUnreq(Loc);
5674 else if (IDVal == ".inst")
8
Taking false branch
5675 parseDirectiveInst(Loc);
5676 else if (IDVal == ".cfi_negate_ra_state")
9
Taking false branch
5677 parseDirectiveCFINegateRAState();
5678 else if (IDVal == ".cfi_b_key_frame")
10
Taking false branch
5679 parseDirectiveCFIBKeyFrame();
5680 else if (IDVal == ".arch_extension")
11
Taking false branch
5681 parseDirectiveArchExtension(Loc);
5682 else if (IDVal == ".variant_pcs")
12
Taking false branch
5683 parseDirectiveVariantPCS(Loc);
5684 else if (IsMachO
12.1
'IsMachO' is false
) {
13
Taking false branch
5685 if (IDVal == MCLOHDirectiveName())
5686 parseDirectiveLOH(IDVal, Loc);
5687 else
5688 return true;
5689 } else if (IsCOFF
13.1
'IsCOFF' is true
) {
14
Taking true branch
5690 if (IDVal == ".seh_stackalloc")
15
Taking false branch
5691 parseDirectiveSEHAllocStack(Loc);
5692 else if (IDVal == ".seh_endprologue")
16
Taking false branch
5693 parseDirectiveSEHPrologEnd(Loc);
5694 else if (IDVal == ".seh_save_r19r20_x")
17
Taking false branch
5695 parseDirectiveSEHSaveR19R20X(Loc);
5696 else if (IDVal == ".seh_save_fplr")
18
Taking false branch
5697 parseDirectiveSEHSaveFPLR(Loc);
5698 else if (IDVal == ".seh_save_fplr_x")
19
Taking false branch
5699 parseDirectiveSEHSaveFPLRX(Loc);
5700 else if (IDVal == ".seh_save_reg")
20
Taking true branch
5701 parseDirectiveSEHSaveReg(Loc);
21
Calling 'AArch64AsmParser::parseDirectiveSEHSaveReg'
5702 else if (IDVal == ".seh_save_reg_x")
5703 parseDirectiveSEHSaveRegX(Loc);
5704 else if (IDVal == ".seh_save_regp")
5705 parseDirectiveSEHSaveRegP(Loc);
5706 else if (IDVal == ".seh_save_regp_x")
5707 parseDirectiveSEHSaveRegPX(Loc);
5708 else if (IDVal == ".seh_save_lrpair")
5709 parseDirectiveSEHSaveLRPair(Loc);
5710 else if (IDVal == ".seh_save_freg")
5711 parseDirectiveSEHSaveFReg(Loc);
5712 else if (IDVal == ".seh_save_freg_x")
5713 parseDirectiveSEHSaveFRegX(Loc);
5714 else if (IDVal == ".seh_save_fregp")
5715 parseDirectiveSEHSaveFRegP(Loc);
5716 else if (IDVal == ".seh_save_fregp_x")
5717 parseDirectiveSEHSaveFRegPX(Loc);
5718 else if (IDVal == ".seh_set_fp")
5719 parseDirectiveSEHSetFP(Loc);