Bug Summary

File:build/source/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Warning:line 7652, column 22
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name AArch64AsmParser.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/AArch64/AsmParser -I /build/source/llvm/lib/Target/AArch64/AsmParser -I /build/source/llvm/lib/Target/AArch64 -I lib/Target/AArch64 -I include -I /build/source/llvm/include -I lib/Target/AArch64/AsmParser/.. -I /build/source/llvm/lib/Target/AArch64/AsmParser/.. -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/source/= -fcoverage-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/source/= -source-date-epoch 1668078801 -O2 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/= -ferror-limit 19 -fvisibility=hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-11-10-135928-647445-1 -x c++ /build/source/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
10#include "MCTargetDesc/AArch64AddressingModes.h"
11#include "MCTargetDesc/AArch64InstPrinter.h"
12#include "MCTargetDesc/AArch64MCExpr.h"
13#include "MCTargetDesc/AArch64MCTargetDesc.h"
14#include "MCTargetDesc/AArch64TargetStreamer.h"
15#include "TargetInfo/AArch64TargetInfo.h"
16#include "Utils/AArch64BaseInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringExtras.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/StringSwitch.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
31#include "llvm/MC/MCLinkerOptimizationHint.h"
32#include "llvm/MC/MCObjectFileInfo.h"
33#include "llvm/MC/MCParser/MCAsmLexer.h"
34#include "llvm/MC/MCParser/MCAsmParser.h"
35#include "llvm/MC/MCParser/MCAsmParserExtension.h"
36#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
37#include "llvm/MC/MCParser/MCTargetAsmParser.h"
38#include "llvm/MC/MCRegisterInfo.h"
39#include "llvm/MC/MCStreamer.h"
40#include "llvm/MC/MCSubtargetInfo.h"
41#include "llvm/MC/MCSymbol.h"
42#include "llvm/MC/MCTargetOptions.h"
43#include "llvm/MC/MCValue.h"
44#include "llvm/MC/SubtargetFeature.h"
45#include "llvm/MC/TargetRegistry.h"
46#include "llvm/Support/Casting.h"
47#include "llvm/Support/Compiler.h"
48#include "llvm/Support/ErrorHandling.h"
49#include "llvm/Support/MathExtras.h"
50#include "llvm/Support/SMLoc.h"
51#include "llvm/Support/AArch64TargetParser.h"
52#include "llvm/Support/TargetParser.h"
53#include "llvm/Support/raw_ostream.h"
54#include <cassert>
55#include <cctype>
56#include <cstdint>
57#include <cstdio>
58#include <string>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65namespace {
66
67enum class RegKind {
68 Scalar,
69 NeonVector,
70 SVEDataVector,
71 SVEPredicateAsCounter,
72 SVEPredicateVector,
73 Matrix,
74 LookupTable
75};
76
77enum class MatrixKind { Array, Tile, Row, Col };
78
79enum RegConstraintEqualityTy {
80 EqualsReg,
81 EqualsSuperReg,
82 EqualsSubReg
83};
84
85class AArch64AsmParser : public MCTargetAsmParser {
86private:
87 StringRef Mnemonic; ///< Instruction mnemonic.
88
89 // Map of register aliases registers via the .req directive.
90 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
91
92 class PrefixInfo {
93 public:
94 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
95 PrefixInfo Prefix;
96 switch (Inst.getOpcode()) {
97 case AArch64::MOVPRFX_ZZ:
98 Prefix.Active = true;
99 Prefix.Dst = Inst.getOperand(0).getReg();
100 break;
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
105 Prefix.Active = true;
106 Prefix.Predicated = true;
107 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
108 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 109
, __extension__ __PRETTY_FUNCTION__))
109 "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 109
, __extension__ __PRETTY_FUNCTION__))
;
110 Prefix.Dst = Inst.getOperand(0).getReg();
111 Prefix.Pg = Inst.getOperand(2).getReg();
112 break;
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
117 Prefix.Active = true;
118 Prefix.Predicated = true;
119 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
120 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 121
, __extension__ __PRETTY_FUNCTION__))
121 "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 121
, __extension__ __PRETTY_FUNCTION__))
;
122 Prefix.Dst = Inst.getOperand(0).getReg();
123 Prefix.Pg = Inst.getOperand(1).getReg();
124 break;
125 default:
126 break;
127 }
128
129 return Prefix;
130 }
131
132 PrefixInfo() = default;
133 bool isActive() const { return Active; }
134 bool isPredicated() const { return Predicated; }
135 unsigned getElementSize() const {
136 assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail
("Predicated", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 136, __extension__ __PRETTY_FUNCTION__))
;
137 return ElementSize;
138 }
139 unsigned getDstReg() const { return Dst; }
140 unsigned getPgReg() const {
141 assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail
("Predicated", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 141, __extension__ __PRETTY_FUNCTION__))
;
142 return Pg;
143 }
144
145 private:
146 bool Active = false;
147 bool Predicated = false;
148 unsigned ElementSize;
149 unsigned Dst;
150 unsigned Pg;
151 } NextPrefix;
152
153 AArch64TargetStreamer &getTargetStreamer() {
154 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
155 return static_cast<AArch64TargetStreamer &>(TS);
156 }
157
158 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
159
160 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
161 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
162 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
163 std::string &Suggestion);
164 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
165 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
166 bool parseRegister(OperandVector &Operands);
167 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
168 bool parseNeonVectorList(OperandVector &Operands);
169 bool parseOptionalMulOperand(OperandVector &Operands);
170 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
171 bool parseKeywordOperand(OperandVector &Operands);
172 bool parseOperand(OperandVector &Operands, bool isCondCode,
173 bool invertCondCode);
174 bool parseImmExpr(int64_t &Out);
175 bool parseComma();
176 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
177 unsigned Last);
178
179 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
180 OperandVector &Operands);
181
182 bool parseDirectiveArch(SMLoc L);
183 bool parseDirectiveArchExtension(SMLoc L);
184 bool parseDirectiveCPU(SMLoc L);
185 bool parseDirectiveInst(SMLoc L);
186
187 bool parseDirectiveTLSDescCall(SMLoc L);
188
189 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
190 bool parseDirectiveLtorg(SMLoc L);
191
192 bool parseDirectiveReq(StringRef Name, SMLoc L);
193 bool parseDirectiveUnreq(SMLoc L);
194 bool parseDirectiveCFINegateRAState();
195 bool parseDirectiveCFIBKeyFrame();
196 bool parseDirectiveCFIMTETaggedFrame();
197
198 bool parseDirectiveVariantPCS(SMLoc L);
199
200 bool parseDirectiveSEHAllocStack(SMLoc L);
201 bool parseDirectiveSEHPrologEnd(SMLoc L);
202 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
203 bool parseDirectiveSEHSaveFPLR(SMLoc L);
204 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
205 bool parseDirectiveSEHSaveReg(SMLoc L);
206 bool parseDirectiveSEHSaveRegX(SMLoc L);
207 bool parseDirectiveSEHSaveRegP(SMLoc L);
208 bool parseDirectiveSEHSaveRegPX(SMLoc L);
209 bool parseDirectiveSEHSaveLRPair(SMLoc L);
210 bool parseDirectiveSEHSaveFReg(SMLoc L);
211 bool parseDirectiveSEHSaveFRegX(SMLoc L);
212 bool parseDirectiveSEHSaveFRegP(SMLoc L);
213 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
214 bool parseDirectiveSEHSetFP(SMLoc L);
215 bool parseDirectiveSEHAddFP(SMLoc L);
216 bool parseDirectiveSEHNop(SMLoc L);
217 bool parseDirectiveSEHSaveNext(SMLoc L);
218 bool parseDirectiveSEHEpilogStart(SMLoc L);
219 bool parseDirectiveSEHEpilogEnd(SMLoc L);
220 bool parseDirectiveSEHTrapFrame(SMLoc L);
221 bool parseDirectiveSEHMachineFrame(SMLoc L);
222 bool parseDirectiveSEHContext(SMLoc L);
223 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
224 bool parseDirectiveSEHPACSignLR(SMLoc L);
225 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
226
227 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
228 SmallVectorImpl<SMLoc> &Loc);
229 unsigned getNumRegsForRegKind(RegKind K);
230 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
231 OperandVector &Operands, MCStreamer &Out,
232 uint64_t &ErrorInfo,
233 bool MatchingInlineAsm) override;
234/// @name Auto-generated Match Functions
235/// {
236
237#define GET_ASSEMBLER_HEADER
238#include "AArch64GenAsmMatcher.inc"
239
240 /// }
241
242 OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
243 OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
244 RegKind MatchKind);
245 OperandMatchResultTy tryParseMatrixRegister(OperandVector &Operands);
246 OperandMatchResultTy tryParseSVCR(OperandVector &Operands);
247 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
248 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
249 OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
250 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
251 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
252 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
253 template <bool IsSVEPrefetch = false>
254 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
255 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
256 OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
257 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
258 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
259 template<bool AddFPZeroAsLiteral>
260 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
261 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
262 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
263 bool tryParseNeonVectorRegister(OperandVector &Operands);
264 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
265 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
266 template <bool ParseShiftExtend,
267 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
268 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
269 OperandMatchResultTy tryParseZTOperand(OperandVector &Operands);
270 template <bool ParseShiftExtend, bool ParseSuffix>
271 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
272 template <RegKind RK>
273 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
274 template <RegKind VectorKind>
275 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
276 bool ExpectMatch = false);
277 OperandMatchResultTy tryParseMatrixTileList(OperandVector &Operands);
278 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
279 OperandMatchResultTy tryParseSVEVecLenSpecifier(OperandVector &Operands);
280 OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands);
281 OperandMatchResultTy tryParseImmRange(OperandVector &Operands);
282
283public:
284 enum AArch64MatchResultTy {
285 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
286#define GET_OPERAND_DIAGNOSTIC_TYPES
287#include "AArch64GenAsmMatcher.inc"
288 };
289 bool IsILP32;
290
291 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
292 const MCInstrInfo &MII, const MCTargetOptions &Options)
293 : MCTargetAsmParser(Options, STI, MII) {
294 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
295 MCAsmParserExtension::Initialize(Parser);
296 MCStreamer &S = getParser().getStreamer();
297 if (S.getTargetStreamer() == nullptr)
298 new AArch64TargetStreamer(S);
299
300 // Alias .hword/.word/.[dx]word to the target-independent
301 // .2byte/.4byte/.8byte directives as they have the same form and
302 // semantics:
303 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
304 Parser.addAliasForDirective(".hword", ".2byte");
305 Parser.addAliasForDirective(".word", ".4byte");
306 Parser.addAliasForDirective(".dword", ".8byte");
307 Parser.addAliasForDirective(".xword", ".8byte");
308
309 // Initialize the set of available features.
310 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
311 }
312
313 bool areEqualRegs(const MCParsedAsmOperand &Op1,
314 const MCParsedAsmOperand &Op2) const override;
315 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
316 SMLoc NameLoc, OperandVector &Operands) override;
317 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
318 OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
319 SMLoc &EndLoc) override;
320 bool ParseDirective(AsmToken DirectiveID) override;
321 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
322 unsigned Kind) override;
323
324 static bool classifySymbolRef(const MCExpr *Expr,
325 AArch64MCExpr::VariantKind &ELFRefKind,
326 MCSymbolRefExpr::VariantKind &DarwinRefKind,
327 int64_t &Addend);
328};
329
330/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
331/// instruction.
332class AArch64Operand : public MCParsedAsmOperand {
333private:
334 enum KindTy {
335 k_Immediate,
336 k_ShiftedImm,
337 k_ImmRange,
338 k_CondCode,
339 k_Register,
340 k_MatrixRegister,
341 k_MatrixTileList,
342 k_SVCR,
343 k_VectorList,
344 k_VectorIndex,
345 k_Token,
346 k_SysReg,
347 k_SysCR,
348 k_Prefetch,
349 k_ShiftExtend,
350 k_FPImm,
351 k_Barrier,
352 k_PSBHint,
353 k_BTIHint,
354 } Kind;
355
356 SMLoc StartLoc, EndLoc;
357
358 struct TokOp {
359 const char *Data;
360 unsigned Length;
361 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
362 };
363
364 // Separate shift/extend operand.
365 struct ShiftExtendOp {
366 AArch64_AM::ShiftExtendType Type;
367 unsigned Amount;
368 bool HasExplicitAmount;
369 };
370
371 struct RegOp {
372 unsigned RegNum;
373 RegKind Kind;
374 int ElementWidth;
375
376 // The register may be allowed as a different register class,
377 // e.g. for GPR64as32 or GPR32as64.
378 RegConstraintEqualityTy EqualityTy;
379
380 // In some cases the shift/extend needs to be explicitly parsed together
381 // with the register, rather than as a separate operand. This is needed
382 // for addressing modes where the instruction as a whole dictates the
383 // scaling/extend, rather than specific bits in the instruction.
384 // By parsing them as a single operand, we avoid the need to pass an
385 // extra operand in all CodeGen patterns (because all operands need to
386 // have an associated value), and we avoid the need to update TableGen to
387 // accept operands that have no associated bits in the instruction.
388 //
389 // An added benefit of parsing them together is that the assembler
390 // can give a sensible diagnostic if the scaling is not correct.
391 //
392 // The default is 'lsl #0' (HasExplicitAmount = false) if no
393 // ShiftExtend is specified.
394 ShiftExtendOp ShiftExtend;
395 };
396
397 struct MatrixRegOp {
398 unsigned RegNum;
399 unsigned ElementWidth;
400 MatrixKind Kind;
401 };
402
403 struct MatrixTileListOp {
404 unsigned RegMask = 0;
405 };
406
407 struct VectorListOp {
408 unsigned RegNum;
409 unsigned Count;
410 unsigned NumElements;
411 unsigned ElementWidth;
412 RegKind RegisterKind;
413 };
414
415 struct VectorIndexOp {
416 int Val;
417 };
418
419 struct ImmOp {
420 const MCExpr *Val;
421 };
422
423 struct ShiftedImmOp {
424 const MCExpr *Val;
425 unsigned ShiftAmount;
426 };
427
428 struct ImmRangeOp {
429 unsigned First;
430 unsigned Last;
431 };
432
433 struct CondCodeOp {
434 AArch64CC::CondCode Code;
435 };
436
437 struct FPImmOp {
438 uint64_t Val; // APFloat value bitcasted to uint64_t.
439 bool IsExact; // describes whether parsed value was exact.
440 };
441
442 struct BarrierOp {
443 const char *Data;
444 unsigned Length;
445 unsigned Val; // Not the enum since not all values have names.
446 bool HasnXSModifier;
447 };
448
449 struct SysRegOp {
450 const char *Data;
451 unsigned Length;
452 uint32_t MRSReg;
453 uint32_t MSRReg;
454 uint32_t PStateField;
455 };
456
457 struct SysCRImmOp {
458 unsigned Val;
459 };
460
461 struct PrefetchOp {
462 const char *Data;
463 unsigned Length;
464 unsigned Val;
465 };
466
467 struct PSBHintOp {
468 const char *Data;
469 unsigned Length;
470 unsigned Val;
471 };
472
473 struct BTIHintOp {
474 const char *Data;
475 unsigned Length;
476 unsigned Val;
477 };
478
479 struct SVCROp {
480 const char *Data;
481 unsigned Length;
482 unsigned PStateField;
483 };
484
485 union {
486 struct TokOp Tok;
487 struct RegOp Reg;
488 struct MatrixRegOp MatrixReg;
489 struct MatrixTileListOp MatrixTileList;
490 struct VectorListOp VectorList;
491 struct VectorIndexOp VectorIndex;
492 struct ImmOp Imm;
493 struct ShiftedImmOp ShiftedImm;
494 struct ImmRangeOp ImmRange;
495 struct CondCodeOp CondCode;
496 struct FPImmOp FPImm;
497 struct BarrierOp Barrier;
498 struct SysRegOp SysReg;
499 struct SysCRImmOp SysCRImm;
500 struct PrefetchOp Prefetch;
501 struct PSBHintOp PSBHint;
502 struct BTIHintOp BTIHint;
503 struct ShiftExtendOp ShiftExtend;
504 struct SVCROp SVCR;
505 };
506
507 // Keep the MCContext around as the MCExprs may need manipulated during
508 // the add<>Operands() calls.
509 MCContext &Ctx;
510
511public:
512 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
513
514 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
515 Kind = o.Kind;
516 StartLoc = o.StartLoc;
517 EndLoc = o.EndLoc;
518 switch (Kind) {
519 case k_Token:
520 Tok = o.Tok;
521 break;
522 case k_Immediate:
523 Imm = o.Imm;
524 break;
525 case k_ShiftedImm:
526 ShiftedImm = o.ShiftedImm;
527 break;
528 case k_ImmRange:
529 ImmRange = o.ImmRange;
530 break;
531 case k_CondCode:
532 CondCode = o.CondCode;
533 break;
534 case k_FPImm:
535 FPImm = o.FPImm;
536 break;
537 case k_Barrier:
538 Barrier = o.Barrier;
539 break;
540 case k_Register:
541 Reg = o.Reg;
542 break;
543 case k_MatrixRegister:
544 MatrixReg = o.MatrixReg;
545 break;
546 case k_MatrixTileList:
547 MatrixTileList = o.MatrixTileList;
548 break;
549 case k_VectorList:
550 VectorList = o.VectorList;
551 break;
552 case k_VectorIndex:
553 VectorIndex = o.VectorIndex;
554 break;
555 case k_SysReg:
556 SysReg = o.SysReg;
557 break;
558 case k_SysCR:
559 SysCRImm = o.SysCRImm;
560 break;
561 case k_Prefetch:
562 Prefetch = o.Prefetch;
563 break;
564 case k_PSBHint:
565 PSBHint = o.PSBHint;
566 break;
567 case k_BTIHint:
568 BTIHint = o.BTIHint;
569 break;
570 case k_ShiftExtend:
571 ShiftExtend = o.ShiftExtend;
572 break;
573 case k_SVCR:
574 SVCR = o.SVCR;
575 break;
576 }
577 }
578
579 /// getStartLoc - Get the location of the first token of this operand.
580 SMLoc getStartLoc() const override { return StartLoc; }
581 /// getEndLoc - Get the location of the last token of this operand.
582 SMLoc getEndLoc() const override { return EndLoc; }
583
584 StringRef getToken() const {
585 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 585
, __extension__ __PRETTY_FUNCTION__))
;
586 return StringRef(Tok.Data, Tok.Length);
587 }
588
589 bool isTokenSuffix() const {
590 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 590
, __extension__ __PRETTY_FUNCTION__))
;
591 return Tok.IsSuffix;
592 }
593
594 const MCExpr *getImm() const {
595 assert(Kind == k_Immediate && "Invalid access!")(static_cast <bool> (Kind == k_Immediate && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 595
, __extension__ __PRETTY_FUNCTION__))
;
596 return Imm.Val;
597 }
598
599 const MCExpr *getShiftedImmVal() const {
600 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 600
, __extension__ __PRETTY_FUNCTION__))
;
601 return ShiftedImm.Val;
602 }
603
604 unsigned getShiftedImmShift() const {
605 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 605
, __extension__ __PRETTY_FUNCTION__))
;
606 return ShiftedImm.ShiftAmount;
607 }
608
609 unsigned getFirstImmVal() const {
610 assert(Kind == k_ImmRange && "Invalid access!")(static_cast <bool> (Kind == k_ImmRange && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ImmRange && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 610
, __extension__ __PRETTY_FUNCTION__))
;
611 return ImmRange.First;
612 }
613
614 unsigned getLastImmVal() const {
615 assert(Kind == k_ImmRange && "Invalid access!")(static_cast <bool> (Kind == k_ImmRange && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ImmRange && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 615
, __extension__ __PRETTY_FUNCTION__))
;
616 return ImmRange.Last;
617 }
618
619 AArch64CC::CondCode getCondCode() const {
620 assert(Kind == k_CondCode && "Invalid access!")(static_cast <bool> (Kind == k_CondCode && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 620
, __extension__ __PRETTY_FUNCTION__))
;
621 return CondCode.Code;
622 }
623
624 APFloat getFPImm() const {
625 assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 625
, __extension__ __PRETTY_FUNCTION__))
;
626 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
627 }
628
629 bool getFPImmIsExact() const {
630 assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 630
, __extension__ __PRETTY_FUNCTION__))
;
631 return FPImm.IsExact;
632 }
633
634 unsigned getBarrier() const {
635 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 635
, __extension__ __PRETTY_FUNCTION__))
;
636 return Barrier.Val;
637 }
638
639 StringRef getBarrierName() const {
640 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 640
, __extension__ __PRETTY_FUNCTION__))
;
641 return StringRef(Barrier.Data, Barrier.Length);
642 }
643
644 bool getBarriernXSModifier() const {
645 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 645
, __extension__ __PRETTY_FUNCTION__))
;
646 return Barrier.HasnXSModifier;
647 }
648
649 unsigned getReg() const override {
650 assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 650
, __extension__ __PRETTY_FUNCTION__))
;
651 return Reg.RegNum;
652 }
653
654 unsigned getMatrixReg() const {
655 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 655
, __extension__ __PRETTY_FUNCTION__))
;
656 return MatrixReg.RegNum;
657 }
658
659 unsigned getMatrixElementWidth() const {
660 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 660
, __extension__ __PRETTY_FUNCTION__))
;
661 return MatrixReg.ElementWidth;
662 }
663
664 MatrixKind getMatrixKind() const {
665 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 665
, __extension__ __PRETTY_FUNCTION__))
;
666 return MatrixReg.Kind;
667 }
668
669 unsigned getMatrixTileListRegMask() const {
670 assert(isMatrixTileList() && "Invalid access!")(static_cast <bool> (isMatrixTileList() && "Invalid access!"
) ? void (0) : __assert_fail ("isMatrixTileList() && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 670
, __extension__ __PRETTY_FUNCTION__))
;
671 return MatrixTileList.RegMask;
672 }
673
674 RegConstraintEqualityTy getRegEqualityTy() const {
675 assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 675
, __extension__ __PRETTY_FUNCTION__))
;
676 return Reg.EqualityTy;
677 }
678
679 unsigned getVectorListStart() const {
680 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 680
, __extension__ __PRETTY_FUNCTION__))
;
681 return VectorList.RegNum;
682 }
683
684 unsigned getVectorListCount() const {
685 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 685
, __extension__ __PRETTY_FUNCTION__))
;
686 return VectorList.Count;
687 }
688
689 int getVectorIndex() const {
690 assert(Kind == k_VectorIndex && "Invalid access!")(static_cast <bool> (Kind == k_VectorIndex && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 690
, __extension__ __PRETTY_FUNCTION__))
;
691 return VectorIndex.Val;
692 }
693
694 StringRef getSysReg() const {
695 assert(Kind == k_SysReg && "Invalid access!")(static_cast <bool> (Kind == k_SysReg && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 695
, __extension__ __PRETTY_FUNCTION__))
;
696 return StringRef(SysReg.Data, SysReg.Length);
697 }
698
699 unsigned getSysCR() const {
700 assert(Kind == k_SysCR && "Invalid access!")(static_cast <bool> (Kind == k_SysCR && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 700
, __extension__ __PRETTY_FUNCTION__))
;
701 return SysCRImm.Val;
702 }
703
704 unsigned getPrefetch() const {
705 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 705
, __extension__ __PRETTY_FUNCTION__))
;
706 return Prefetch.Val;
707 }
708
709 unsigned getPSBHint() const {
710 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 710
, __extension__ __PRETTY_FUNCTION__))
;
711 return PSBHint.Val;
712 }
713
714 StringRef getPSBHintName() const {
715 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 715
, __extension__ __PRETTY_FUNCTION__))
;
716 return StringRef(PSBHint.Data, PSBHint.Length);
717 }
718
719 unsigned getBTIHint() const {
720 assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 720
, __extension__ __PRETTY_FUNCTION__))
;
721 return BTIHint.Val;
722 }
723
724 StringRef getBTIHintName() const {
725 assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 725
, __extension__ __PRETTY_FUNCTION__))
;
726 return StringRef(BTIHint.Data, BTIHint.Length);
727 }
728
729 StringRef getSVCR() const {
730 assert(Kind == k_SVCR && "Invalid access!")(static_cast <bool> (Kind == k_SVCR && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SVCR && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 730
, __extension__ __PRETTY_FUNCTION__))
;
731 return StringRef(SVCR.Data, SVCR.Length);
732 }
733
734 StringRef getPrefetchName() const {
735 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 735
, __extension__ __PRETTY_FUNCTION__))
;
736 return StringRef(Prefetch.Data, Prefetch.Length);
737 }
738
739 AArch64_AM::ShiftExtendType getShiftExtendType() const {
740 if (Kind == k_ShiftExtend)
741 return ShiftExtend.Type;
742 if (Kind == k_Register)
743 return Reg.ShiftExtend.Type;
744 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 744)
;
745 }
746
747 unsigned getShiftExtendAmount() const {
748 if (Kind == k_ShiftExtend)
749 return ShiftExtend.Amount;
750 if (Kind == k_Register)
751 return Reg.ShiftExtend.Amount;
752 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 752)
;
753 }
754
755 bool hasShiftExtendAmount() const {
756 if (Kind == k_ShiftExtend)
757 return ShiftExtend.HasExplicitAmount;
758 if (Kind == k_Register)
759 return Reg.ShiftExtend.HasExplicitAmount;
760 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 760)
;
761 }
762
763 bool isImm() const override { return Kind == k_Immediate; }
764 bool isMem() const override { return false; }
765
766 bool isUImm6() const {
767 if (!isImm())
768 return false;
769 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
770 if (!MCE)
771 return false;
772 int64_t Val = MCE->getValue();
773 return (Val >= 0 && Val < 64);
774 }
775
776 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
777
778 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
779 return isImmScaled<Bits, Scale>(true);
780 }
781
782 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
783 DiagnosticPredicate isUImmScaled() const {
784 if (IsRange && isImmRange() &&
785 (getLastImmVal() != getFirstImmVal() + Offset))
786 return DiagnosticPredicateTy::NoMatch;
787
788 return isImmScaled<Bits, Scale, IsRange>(false);
789 }
790
791 template <int Bits, int Scale, bool IsRange = false>
792 DiagnosticPredicate isImmScaled(bool Signed) const {
793 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
794 (isImmRange() && !IsRange))
795 return DiagnosticPredicateTy::NoMatch;
796
797 int64_t Val;
798 if (isImmRange())
799 Val = getFirstImmVal();
800 else {
801 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
802 if (!MCE)
803 return DiagnosticPredicateTy::NoMatch;
804 Val = MCE->getValue();
805 }
806
807 int64_t MinVal, MaxVal;
808 if (Signed) {
809 int64_t Shift = Bits - 1;
810 MinVal = (int64_t(1) << Shift) * -Scale;
811 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
812 } else {
813 MinVal = 0;
814 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
815 }
816
817 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
818 return DiagnosticPredicateTy::Match;
819
820 return DiagnosticPredicateTy::NearMatch;
821 }
822
823 DiagnosticPredicate isSVEPattern() const {
824 if (!isImm())
825 return DiagnosticPredicateTy::NoMatch;
826 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
827 if (!MCE)
828 return DiagnosticPredicateTy::NoMatch;
829 int64_t Val = MCE->getValue();
830 if (Val >= 0 && Val < 32)
831 return DiagnosticPredicateTy::Match;
832 return DiagnosticPredicateTy::NearMatch;
833 }
834
835 DiagnosticPredicate isSVEVecLenSpecifier() const {
836 if (!isImm())
837 return DiagnosticPredicateTy::NoMatch;
838 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
839 if (!MCE)
840 return DiagnosticPredicateTy::NoMatch;
841 int64_t Val = MCE->getValue();
842 if (Val >= 0 && Val <= 1)
843 return DiagnosticPredicateTy::Match;
844 return DiagnosticPredicateTy::NearMatch;
845 }
846
847 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
848 AArch64MCExpr::VariantKind ELFRefKind;
849 MCSymbolRefExpr::VariantKind DarwinRefKind;
850 int64_t Addend;
851 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
852 Addend)) {
853 // If we don't understand the expression, assume the best and
854 // let the fixup and relocation code deal with it.
855 return true;
856 }
857
858 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
859 ELFRefKind == AArch64MCExpr::VK_LO12 ||
860 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
861 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
862 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
863 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
864 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
865 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
866 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
867 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
868 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
869 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
870 // Note that we don't range-check the addend. It's adjusted modulo page
871 // size when converted, so there is no "out of range" condition when using
872 // @pageoff.
873 return true;
874 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
875 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
876 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
877 return Addend == 0;
878 }
879
880 return false;
881 }
882
883 template <int Scale> bool isUImm12Offset() const {
884 if (!isImm())
885 return false;
886
887 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
888 if (!MCE)
889 return isSymbolicUImm12Offset(getImm());
890
891 int64_t Val = MCE->getValue();
892 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
893 }
894
895 template <int N, int M>
896 bool isImmInRange() const {
897 if (!isImm())
898 return false;
899 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
900 if (!MCE)
901 return false;
902 int64_t Val = MCE->getValue();
903 return (Val >= N && Val <= M);
904 }
905
906 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
907 // a logical immediate can always be represented when inverted.
908 template <typename T>
909 bool isLogicalImm() const {
910 if (!isImm())
911 return false;
912 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
913 if (!MCE)
914 return false;
915
916 int64_t Val = MCE->getValue();
917 // Avoid left shift by 64 directly.
918 uint64_t Upper = UINT64_C(-1)-1UL << (sizeof(T) * 4) << (sizeof(T) * 4);
919 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
920 if ((Val & Upper) && (Val & Upper) != Upper)
921 return false;
922
923 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
924 }
925
926 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
927
928 bool isImmRange() const { return Kind == k_ImmRange; }
929
930 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
931 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
932 /// immediate that can be shifted by 'Shift'.
933 template <unsigned Width>
934 Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
935 if (isShiftedImm() && Width == getShiftedImmShift())
936 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
937 return std::make_pair(CE->getValue(), Width);
938
939 if (isImm())
940 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
941 int64_t Val = CE->getValue();
942 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
943 return std::make_pair(Val >> Width, Width);
944 else
945 return std::make_pair(Val, 0u);
946 }
947
948 return {};
949 }
950
951 bool isAddSubImm() const {
952 if (!isShiftedImm() && !isImm())
953 return false;
954
955 const MCExpr *Expr;
956
957 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
958 if (isShiftedImm()) {
959 unsigned Shift = ShiftedImm.ShiftAmount;
960 Expr = ShiftedImm.Val;
961 if (Shift != 0 && Shift != 12)
962 return false;
963 } else {
964 Expr = getImm();
965 }
966
967 AArch64MCExpr::VariantKind ELFRefKind;
968 MCSymbolRefExpr::VariantKind DarwinRefKind;
969 int64_t Addend;
970 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
971 DarwinRefKind, Addend)) {
972 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
973 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
974 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
975 || ELFRefKind == AArch64MCExpr::VK_LO12
976 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
977 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
978 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
979 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
980 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
981 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
982 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
983 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
984 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
985 }
986
987 // If it's a constant, it should be a real immediate in range.
988 if (auto ShiftedVal = getShiftedVal<12>())
989 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
990
991 // If it's an expression, we hope for the best and let the fixup/relocation
992 // code deal with it.
993 return true;
994 }
995
996 bool isAddSubImmNeg() const {
997 if (!isShiftedImm() && !isImm())
998 return false;
999
1000 // Otherwise it should be a real negative immediate in range.
1001 if (auto ShiftedVal = getShiftedVal<12>())
1002 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1003
1004 return false;
1005 }
1006
1007 // Signed value in the range -128 to +127. For element widths of
1008 // 16 bits or higher it may also be a signed multiple of 256 in the
1009 // range -32768 to +32512.
1010 // For element-width of 8 bits a range of -128 to 255 is accepted,
1011 // since a copy of a byte can be either signed/unsigned.
1012 template <typename T>
1013 DiagnosticPredicate isSVECpyImm() const {
1014 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1015 return DiagnosticPredicateTy::NoMatch;
1016
1017 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1018 std::is_same<int8_t, T>::value;
1019 if (auto ShiftedImm = getShiftedVal<8>())
1020 if (!(IsByte && ShiftedImm->second) &&
1021 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1022 << ShiftedImm->second))
1023 return DiagnosticPredicateTy::Match;
1024
1025 return DiagnosticPredicateTy::NearMatch;
1026 }
1027
1028 // Unsigned value in the range 0 to 255. For element widths of
1029 // 16 bits or higher it may also be a signed multiple of 256 in the
1030 // range 0 to 65280.
1031 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1032 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1033 return DiagnosticPredicateTy::NoMatch;
1034
1035 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1036 std::is_same<int8_t, T>::value;
1037 if (auto ShiftedImm = getShiftedVal<8>())
1038 if (!(IsByte && ShiftedImm->second) &&
1039 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1040 << ShiftedImm->second))
1041 return DiagnosticPredicateTy::Match;
1042
1043 return DiagnosticPredicateTy::NearMatch;
1044 }
1045
1046 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1047 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1048 return DiagnosticPredicateTy::Match;
1049 return DiagnosticPredicateTy::NoMatch;
1050 }
1051
1052 bool isCondCode() const { return Kind == k_CondCode; }
1053
1054 bool isSIMDImmType10() const {
1055 if (!isImm())
1056 return false;
1057 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1058 if (!MCE)
1059 return false;
1060 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
1061 }
1062
1063 template<int N>
1064 bool isBranchTarget() const {
1065 if (!isImm())
1066 return false;
1067 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1068 if (!MCE)
1069 return true;
1070 int64_t Val = MCE->getValue();
1071 if (Val & 0x3)
1072 return false;
1073 assert(N > 0 && "Branch target immediate cannot be 0 bits!")(static_cast <bool> (N > 0 && "Branch target immediate cannot be 0 bits!"
) ? void (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1073
, __extension__ __PRETTY_FUNCTION__))
;
1074 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1075 }
1076
1077 bool
1078 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1079 if (!isImm())
1080 return false;
1081
1082 AArch64MCExpr::VariantKind ELFRefKind;
1083 MCSymbolRefExpr::VariantKind DarwinRefKind;
1084 int64_t Addend;
1085 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1086 DarwinRefKind, Addend)) {
1087 return false;
1088 }
1089 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1090 return false;
1091
1092 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1093 }
1094
1095 bool isMovWSymbolG3() const {
1096 return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
1097 }
1098
1099 bool isMovWSymbolG2() const {
1100 return isMovWSymbol(
1101 {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
1102 AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
1103 AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
1104 AArch64MCExpr::VK_DTPREL_G2});
1105 }
1106
1107 bool isMovWSymbolG1() const {
1108 return isMovWSymbol(
1109 {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
1110 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
1111 AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
1112 AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
1113 AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
1114 }
1115
1116 bool isMovWSymbolG0() const {
1117 return isMovWSymbol(
1118 {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
1119 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
1120 AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
1121 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
1122 AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
1123 }
1124
1125 template<int RegWidth, int Shift>
1126 bool isMOVZMovAlias() const {
1127 if (!isImm()) return false;
1128
1129 const MCExpr *E = getImm();
1130 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1131 uint64_t Value = CE->getValue();
1132
1133 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1134 }
1135 // Only supports the case of Shift being 0 if an expression is used as an
1136 // operand
1137 return !Shift && E;
1138 }
1139
1140 template<int RegWidth, int Shift>
1141 bool isMOVNMovAlias() const {
1142 if (!isImm()) return false;
1143
1144 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1145 if (!CE) return false;
1146 uint64_t Value = CE->getValue();
1147
1148 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1149 }
1150
1151 bool isFPImm() const {
1152 return Kind == k_FPImm &&
1153 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1154 }
1155
1156 bool isBarrier() const {
1157 return Kind == k_Barrier && !getBarriernXSModifier();
1158 }
1159 bool isBarriernXS() const {
1160 return Kind == k_Barrier && getBarriernXSModifier();
1161 }
1162 bool isSysReg() const { return Kind == k_SysReg; }
1163
1164 bool isMRSSystemRegister() const {
1165 if (!isSysReg()) return false;
1166
1167 return SysReg.MRSReg != -1U;
1168 }
1169
1170 bool isMSRSystemRegister() const {
1171 if (!isSysReg()) return false;
1172 return SysReg.MSRReg != -1U;
1173 }
1174
1175 bool isSystemPStateFieldWithImm0_1() const {
1176 if (!isSysReg()) return false;
1177 return (SysReg.PStateField == AArch64PState::PAN ||
1178 SysReg.PStateField == AArch64PState::DIT ||
1179 SysReg.PStateField == AArch64PState::UAO ||
1180 SysReg.PStateField == AArch64PState::SSBS);
1181 }
1182
1183 bool isSystemPStateFieldWithImm0_15() const {
1184 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1185 return SysReg.PStateField != -1U;
1186 }
1187
1188 bool isSVCR() const {
1189 if (Kind != k_SVCR)
1190 return false;
1191 return SVCR.PStateField != -1U;
1192 }
1193
1194 bool isReg() const override {
1195 return Kind == k_Register;
1196 }
1197
1198 bool isVectorList() const { return Kind == k_VectorList; }
1199
1200 bool isScalarReg() const {
1201 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1202 }
1203
1204 bool isNeonVectorReg() const {
1205 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1206 }
1207
1208 bool isNeonVectorRegLo() const {
1209 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1210 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1211 Reg.RegNum) ||
1212 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1213 Reg.RegNum));
1214 }
1215
1216 bool isMatrix() const { return Kind == k_MatrixRegister; }
1217 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1218
1219 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1220 RegKind RK;
1221 switch (Class) {
1222 case AArch64::PPRRegClassID:
1223 case AArch64::PPR_3bRegClassID:
1224 case AArch64::PPR_p8to15RegClassID:
1225 RK = RegKind::SVEPredicateAsCounter;
1226 break;
1227 default:
1228 llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class",
"llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1228
)
;
1229 }
1230
1231 return (Kind == k_Register && Reg.Kind == RK) &&
1232 AArch64MCRegisterClasses[Class].contains(getReg());
1233 }
1234
1235 template <unsigned Class> bool isSVEVectorReg() const {
1236 RegKind RK;
1237 switch (Class) {
1238 case AArch64::ZPRRegClassID:
1239 case AArch64::ZPR_3bRegClassID:
1240 case AArch64::ZPR_4bRegClassID:
1241 RK = RegKind::SVEDataVector;
1242 break;
1243 case AArch64::PPRRegClassID:
1244 case AArch64::PPR_3bRegClassID:
1245 RK = RegKind::SVEPredicateVector;
1246 break;
1247 default:
1248 llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class",
"llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1248
)
;
1249 }
1250
1251 return (Kind == k_Register && Reg.Kind == RK) &&
1252 AArch64MCRegisterClasses[Class].contains(getReg());
1253 }
1254
1255 template <unsigned Class> bool isFPRasZPR() const {
1256 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1257 AArch64MCRegisterClasses[Class].contains(getReg());
1258 }
1259
1260 template <int ElementWidth, unsigned Class>
1261 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1262 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1263 return DiagnosticPredicateTy::NoMatch;
1264
1265 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1266 return DiagnosticPredicateTy::Match;
1267
1268 return DiagnosticPredicateTy::NearMatch;
1269 }
1270
1271 template <int ElementWidth, unsigned Class>
1272 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1273 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1274 return DiagnosticPredicateTy::NoMatch;
1275
1276 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1277 return DiagnosticPredicateTy::Match;
1278
1279 return DiagnosticPredicateTy::NearMatch;
1280 }
1281
1282 template <int ElementWidth, unsigned Class>
1283 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1284 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1285 return DiagnosticPredicateTy::NoMatch;
1286
1287 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1288 return DiagnosticPredicateTy::Match;
1289
1290 return DiagnosticPredicateTy::NearMatch;
1291 }
1292
1293 template <int ElementWidth, unsigned Class,
1294 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1295 bool ShiftWidthAlwaysSame>
1296 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1297 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1298 if (!VectorMatch.isMatch())
1299 return DiagnosticPredicateTy::NoMatch;
1300
1301 // Give a more specific diagnostic when the user has explicitly typed in
1302 // a shift-amount that does not match what is expected, but for which
1303 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1304 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1305 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1306 ShiftExtendTy == AArch64_AM::SXTW) &&
1307 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1308 return DiagnosticPredicateTy::NoMatch;
1309
1310 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1311 return DiagnosticPredicateTy::Match;
1312
1313 return DiagnosticPredicateTy::NearMatch;
1314 }
1315
1316 bool isGPR32as64() const {
1317 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1318 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1319 }
1320
1321 bool isGPR64as32() const {
1322 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1323 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1324 }
1325
1326 bool isGPR64x8() const {
1327 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1328 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1329 Reg.RegNum);
1330 }
1331
1332 bool isWSeqPair() const {
1333 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1334 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1335 Reg.RegNum);
1336 }
1337
1338 bool isXSeqPair() const {
1339 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1340 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1341 Reg.RegNum);
1342 }
1343
1344 template<int64_t Angle, int64_t Remainder>
1345 DiagnosticPredicate isComplexRotation() const {
1346 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1347
1348 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1349 if (!CE) return DiagnosticPredicateTy::NoMatch;
1350 uint64_t Value = CE->getValue();
1351
1352 if (Value % Angle == Remainder && Value <= 270)
1353 return DiagnosticPredicateTy::Match;
1354 return DiagnosticPredicateTy::NearMatch;
1355 }
1356
1357 template <unsigned RegClassID> bool isGPR64() const {
1358 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1359 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1360 }
1361
1362 template <unsigned RegClassID, int ExtWidth>
1363 DiagnosticPredicate isGPR64WithShiftExtend() const {
1364 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1365 return DiagnosticPredicateTy::NoMatch;
1366
1367 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1368 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1369 return DiagnosticPredicateTy::Match;
1370 return DiagnosticPredicateTy::NearMatch;
1371 }
1372
1373 /// Is this a vector list with the type implicit (presumably attached to the
1374 /// instruction itself)?
1375 template <RegKind VectorKind, unsigned NumRegs>
1376 bool isImplicitlyTypedVectorList() const {
1377 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1378 VectorList.NumElements == 0 &&
1379 VectorList.RegisterKind == VectorKind;
1380 }
1381
1382 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1383 unsigned ElementWidth>
1384 bool isTypedVectorList() const {
1385 if (Kind != k_VectorList)
1386 return false;
1387 if (VectorList.Count != NumRegs)
1388 return false;
1389 if (VectorList.RegisterKind != VectorKind)
1390 return false;
1391 if (VectorList.ElementWidth != ElementWidth)
1392 return false;
1393 return VectorList.NumElements == NumElements;
1394 }
1395
1396 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1397 unsigned ElementWidth>
1398 DiagnosticPredicate isTypedVectorListMultiple() const {
1399 bool Res =
1400 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1401 if (!Res)
1402 return DiagnosticPredicateTy::NoMatch;
1403 if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0)
1404 return DiagnosticPredicateTy::NearMatch;
1405 return DiagnosticPredicateTy::Match;
1406 }
1407
1408 template <int Min, int Max>
1409 DiagnosticPredicate isVectorIndex() const {
1410 if (Kind != k_VectorIndex)
1411 return DiagnosticPredicateTy::NoMatch;
1412 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1413 return DiagnosticPredicateTy::Match;
1414 return DiagnosticPredicateTy::NearMatch;
1415 }
1416
1417 bool isToken() const override { return Kind == k_Token; }
1418
1419 bool isTokenEqual(StringRef Str) const {
1420 return Kind == k_Token && getToken() == Str;
1421 }
1422 bool isSysCR() const { return Kind == k_SysCR; }
1423 bool isPrefetch() const { return Kind == k_Prefetch; }
1424 bool isPSBHint() const { return Kind == k_PSBHint; }
1425 bool isBTIHint() const { return Kind == k_BTIHint; }
1426 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1427 bool isShifter() const {
1428 if (!isShiftExtend())
1429 return false;
1430
1431 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1432 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1433 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1434 ST == AArch64_AM::MSL);
1435 }
1436
1437 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1438 if (Kind != k_FPImm)
1439 return DiagnosticPredicateTy::NoMatch;
1440
1441 if (getFPImmIsExact()) {
1442 // Lookup the immediate from table of supported immediates.
1443 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1444 assert(Desc && "Unknown enum value")(static_cast <bool> (Desc && "Unknown enum value"
) ? void (0) : __assert_fail ("Desc && \"Unknown enum value\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1444
, __extension__ __PRETTY_FUNCTION__))
;
1445
1446 // Calculate its FP value.
1447 APFloat RealVal(APFloat::IEEEdouble());
1448 auto StatusOrErr =
1449 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1450 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1451 llvm_unreachable("FP immediate is not exact")::llvm::llvm_unreachable_internal("FP immediate is not exact"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1451
)
;
1452
1453 if (getFPImm().bitwiseIsEqual(RealVal))
1454 return DiagnosticPredicateTy::Match;
1455 }
1456
1457 return DiagnosticPredicateTy::NearMatch;
1458 }
1459
1460 template <unsigned ImmA, unsigned ImmB>
1461 DiagnosticPredicate isExactFPImm() const {
1462 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1463 if ((Res = isExactFPImm<ImmA>()))
1464 return DiagnosticPredicateTy::Match;
1465 if ((Res = isExactFPImm<ImmB>()))
1466 return DiagnosticPredicateTy::Match;
1467 return Res;
1468 }
1469
1470 bool isExtend() const {
1471 if (!isShiftExtend())
1472 return false;
1473
1474 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1475 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1476 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1477 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1478 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1479 ET == AArch64_AM::LSL) &&
1480 getShiftExtendAmount() <= 4;
1481 }
1482
1483 bool isExtend64() const {
1484 if (!isExtend())
1485 return false;
1486 // Make sure the extend expects a 32-bit source register.
1487 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1488 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1489 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1490 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1491 }
1492
1493 bool isExtendLSL64() const {
1494 if (!isExtend())
1495 return false;
1496 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1497 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1498 ET == AArch64_AM::LSL) &&
1499 getShiftExtendAmount() <= 4;
1500 }
1501
1502 template<int Width> bool isMemXExtend() const {
1503 if (!isExtend())
1504 return false;
1505 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1506 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1507 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1508 getShiftExtendAmount() == 0);
1509 }
1510
1511 template<int Width> bool isMemWExtend() const {
1512 if (!isExtend())
1513 return false;
1514 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1515 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1516 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1517 getShiftExtendAmount() == 0);
1518 }
1519
1520 template <unsigned width>
1521 bool isArithmeticShifter() const {
1522 if (!isShifter())
1523 return false;
1524
1525 // An arithmetic shifter is LSL, LSR, or ASR.
1526 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1527 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1528 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1529 }
1530
1531 template <unsigned width>
1532 bool isLogicalShifter() const {
1533 if (!isShifter())
1534 return false;
1535
1536 // A logical shifter is LSL, LSR, ASR or ROR.
1537 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1538 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1539 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1540 getShiftExtendAmount() < width;
1541 }
1542
1543 bool isMovImm32Shifter() const {
1544 if (!isShifter())
1545 return false;
1546
1547 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1548 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1549 if (ST != AArch64_AM::LSL)
1550 return false;
1551 uint64_t Val = getShiftExtendAmount();
1552 return (Val == 0 || Val == 16);
1553 }
1554
1555 bool isMovImm64Shifter() const {
1556 if (!isShifter())
1557 return false;
1558
1559 // A MOVi shifter is LSL of 0 or 16.
1560 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1561 if (ST != AArch64_AM::LSL)
1562 return false;
1563 uint64_t Val = getShiftExtendAmount();
1564 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1565 }
1566
1567 bool isLogicalVecShifter() const {
1568 if (!isShifter())
1569 return false;
1570
1571 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1572 unsigned Shift = getShiftExtendAmount();
1573 return getShiftExtendType() == AArch64_AM::LSL &&
1574 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1575 }
1576
1577 bool isLogicalVecHalfWordShifter() const {
1578 if (!isLogicalVecShifter())
1579 return false;
1580
1581 // A logical vector shifter is a left shift by 0 or 8.
1582 unsigned Shift = getShiftExtendAmount();
1583 return getShiftExtendType() == AArch64_AM::LSL &&
1584 (Shift == 0 || Shift == 8);
1585 }
1586
1587 bool isMoveVecShifter() const {
1588 if (!isShiftExtend())
1589 return false;
1590
1591 // A logical vector shifter is a left shift by 8 or 16.
1592 unsigned Shift = getShiftExtendAmount();
1593 return getShiftExtendType() == AArch64_AM::MSL &&
1594 (Shift == 8 || Shift == 16);
1595 }
1596
1597 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1598 // to LDUR/STUR when the offset is not legal for the former but is for
1599 // the latter. As such, in addition to checking for being a legal unscaled
1600 // address, also check that it is not a legal scaled address. This avoids
1601 // ambiguity in the matcher.
1602 template<int Width>
1603 bool isSImm9OffsetFB() const {
1604 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1605 }
1606
1607 bool isAdrpLabel() const {
1608 // Validation was handled during parsing, so we just verify that
1609 // something didn't go haywire.
1610 if (!isImm())
1611 return false;
1612
1613 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1614 int64_t Val = CE->getValue();
1615 int64_t Min = - (4096 * (1LL << (21 - 1)));
1616 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1617 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1618 }
1619
1620 return true;
1621 }
1622
1623 bool isAdrLabel() const {
1624 // Validation was handled during parsing, so we just verify that
1625 // something didn't go haywire.
1626 if (!isImm())
1627 return false;
1628
1629 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1630 int64_t Val = CE->getValue();
1631 int64_t Min = - (1LL << (21 - 1));
1632 int64_t Max = ((1LL << (21 - 1)) - 1);
1633 return Val >= Min && Val <= Max;
1634 }
1635
1636 return true;
1637 }
1638
1639 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1640 DiagnosticPredicate isMatrixRegOperand() const {
1641 if (!isMatrix())
1642 return DiagnosticPredicateTy::NoMatch;
1643 if (getMatrixKind() != Kind ||
1644 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1645 EltSize != getMatrixElementWidth())
1646 return DiagnosticPredicateTy::NearMatch;
1647 return DiagnosticPredicateTy::Match;
1648 }
1649
1650 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1651 // Add as immediates when possible. Null MCExpr = 0.
1652 if (!Expr)
1653 Inst.addOperand(MCOperand::createImm(0));
1654 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1655 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1656 else
1657 Inst.addOperand(MCOperand::createExpr(Expr));
1658 }
1659
1660 void addRegOperands(MCInst &Inst, unsigned N) const {
1661 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1661
, __extension__ __PRETTY_FUNCTION__))
;
1662 Inst.addOperand(MCOperand::createReg(getReg()));
1663 }
1664
1665 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1666 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1666
, __extension__ __PRETTY_FUNCTION__))
;
1667 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1668 }
1669
1670 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1671 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1671
, __extension__ __PRETTY_FUNCTION__))
;
1672 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1673
, __extension__ __PRETTY_FUNCTION__))
1673 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1673
, __extension__ __PRETTY_FUNCTION__))
;
1674
1675 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1676 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1677 RI->getEncodingValue(getReg()));
1678
1679 Inst.addOperand(MCOperand::createReg(Reg));
1680 }
1681
1682 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1683 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1683
, __extension__ __PRETTY_FUNCTION__))
;
1684 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1685
, __extension__ __PRETTY_FUNCTION__))
1685 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1685
, __extension__ __PRETTY_FUNCTION__))
;
1686
1687 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1688 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1689 RI->getEncodingValue(getReg()));
1690
1691 Inst.addOperand(MCOperand::createReg(Reg));
1692 }
1693
1694 template <int Width>
1695 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1696 unsigned Base;
1697 switch (Width) {
1698 case 8: Base = AArch64::B0; break;
1699 case 16: Base = AArch64::H0; break;
1700 case 32: Base = AArch64::S0; break;
1701 case 64: Base = AArch64::D0; break;
1702 case 128: Base = AArch64::Q0; break;
1703 default:
1704 llvm_unreachable("Unsupported width")::llvm::llvm_unreachable_internal("Unsupported width", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1704)
;
1705 }
1706 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1707 }
1708
1709 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1710 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1710
, __extension__ __PRETTY_FUNCTION__))
;
1711 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1712
, __extension__ __PRETTY_FUNCTION__))
1712 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1712
, __extension__ __PRETTY_FUNCTION__))
;
1713 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1714 }
1715
1716 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1717 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1717
, __extension__ __PRETTY_FUNCTION__))
;
1718 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1719
, __extension__ __PRETTY_FUNCTION__))
1719 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1719
, __extension__ __PRETTY_FUNCTION__))
;
1720 Inst.addOperand(MCOperand::createReg(getReg()));
1721 }
1722
1723 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1724 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1724
, __extension__ __PRETTY_FUNCTION__))
;
1725 Inst.addOperand(MCOperand::createReg(getReg()));
1726 }
1727
1728 enum VecListIndexType {
1729 VecListIdx_DReg = 0,
1730 VecListIdx_QReg = 1,
1731 VecListIdx_ZReg = 2,
1732 VecListIdx_PReg = 3,
1733 };
1734
1735 template <VecListIndexType RegTy, unsigned NumRegs>
1736 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1737 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1737
, __extension__ __PRETTY_FUNCTION__))
;
1738 static const unsigned FirstRegs[][5] = {
1739 /* DReg */ { AArch64::Q0,
1740 AArch64::D0, AArch64::D0_D1,
1741 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1742 /* QReg */ { AArch64::Q0,
1743 AArch64::Q0, AArch64::Q0_Q1,
1744 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1745 /* ZReg */ { AArch64::Z0,
1746 AArch64::Z0, AArch64::Z0_Z1,
1747 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1748 /* PReg */ { AArch64::P0,
1749 AArch64::P0, AArch64::P0_P1 }
1750 };
1751
1752 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs
<= 4) && " NumRegs must be <= 4 for ZRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1753
, __extension__ __PRETTY_FUNCTION__))
1753 " NumRegs must be <= 4 for ZRegs")(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs
<= 4) && " NumRegs must be <= 4 for ZRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1753
, __extension__ __PRETTY_FUNCTION__))
;
1754
1755 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&(static_cast <bool> ((RegTy != VecListIdx_PReg || NumRegs
<= 2) && " NumRegs must be <= 2 for PRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_PReg || NumRegs <= 2) && \" NumRegs must be <= 2 for PRegs\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1756
, __extension__ __PRETTY_FUNCTION__))
1756 " NumRegs must be <= 2 for PRegs")(static_cast <bool> ((RegTy != VecListIdx_PReg || NumRegs
<= 2) && " NumRegs must be <= 2 for PRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_PReg || NumRegs <= 2) && \" NumRegs must be <= 2 for PRegs\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1756
, __extension__ __PRETTY_FUNCTION__))
;
1757
1758 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1759 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1760 FirstRegs[(unsigned)RegTy][0]));
1761 }
1762
1763 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1764 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1764
, __extension__ __PRETTY_FUNCTION__))
;
1765 unsigned RegMask = getMatrixTileListRegMask();
1766 assert(RegMask <= 0xFF && "Invalid mask!")(static_cast <bool> (RegMask <= 0xFF && "Invalid mask!"
) ? void (0) : __assert_fail ("RegMask <= 0xFF && \"Invalid mask!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1766
, __extension__ __PRETTY_FUNCTION__))
;
1767 Inst.addOperand(MCOperand::createImm(RegMask));
1768 }
1769
1770 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1771 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1771
, __extension__ __PRETTY_FUNCTION__))
;
1772 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1773 }
1774
1775 template <unsigned ImmIs0, unsigned ImmIs1>
1776 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1777 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1777
, __extension__ __PRETTY_FUNCTION__))
;
1778 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")(static_cast <bool> (bool(isExactFPImm<ImmIs0, ImmIs1
>()) && "Invalid operand") ? void (0) : __assert_fail
("bool(isExactFPImm<ImmIs0, ImmIs1>()) && \"Invalid operand\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1778
, __extension__ __PRETTY_FUNCTION__))
;
1779 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1780 }
1781
1782 void addImmOperands(MCInst &Inst, unsigned N) const {
1783 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1783
, __extension__ __PRETTY_FUNCTION__))
;
1784 // If this is a pageoff symrefexpr with an addend, adjust the addend
1785 // to be only the page-offset portion. Otherwise, just add the expr
1786 // as-is.
1787 addExpr(Inst, getImm());
1788 }
1789
1790 template <int Shift>
1791 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1792 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1792
, __extension__ __PRETTY_FUNCTION__))
;
1793 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1794 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1795 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1796 } else if (isShiftedImm()) {
1797 addExpr(Inst, getShiftedImmVal());
1798 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1799 } else {
1800 addExpr(Inst, getImm());
1801 Inst.addOperand(MCOperand::createImm(0));
1802 }
1803 }
1804
1805 template <int Shift>
1806 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1807 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1807
, __extension__ __PRETTY_FUNCTION__))
;
1808 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1809 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1810 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1811 } else
1812 llvm_unreachable("Not a shifted negative immediate")::llvm::llvm_unreachable_internal("Not a shifted negative immediate"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1812
)
;
1813 }
1814
1815 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1816 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1816
, __extension__ __PRETTY_FUNCTION__))
;
1817 Inst.addOperand(MCOperand::createImm(getCondCode()));
1818 }
1819
1820 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1821 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1821
, __extension__ __PRETTY_FUNCTION__))
;
1822 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1823 if (!MCE)
1824 addExpr(Inst, getImm());
1825 else
1826 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1827 }
1828
1829 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1830 addImmOperands(Inst, N);
1831 }
1832
1833 template<int Scale>
1834 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1835 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1835
, __extension__ __PRETTY_FUNCTION__))
;
1836 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1837
1838 if (!MCE) {
1839 Inst.addOperand(MCOperand::createExpr(getImm()));
1840 return;
1841 }
1842 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1843 }
1844
1845 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1846 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1846
, __extension__ __PRETTY_FUNCTION__))
;
1847 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1848 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1849 }
1850
1851 template <int Scale>
1852 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1853 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1853
, __extension__ __PRETTY_FUNCTION__))
;
1854 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1855 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1856 }
1857
1858 template <int Scale>
1859 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
1860 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1860
, __extension__ __PRETTY_FUNCTION__))
;
1861 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
1862 }
1863
1864 template <typename T>
1865 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1866 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1866
, __extension__ __PRETTY_FUNCTION__))
;
1867 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1868 std::make_unsigned_t<T> Val = MCE->getValue();
1869 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1870 Inst.addOperand(MCOperand::createImm(encoding));
1871 }
1872
1873 template <typename T>
1874 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1875 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1875
, __extension__ __PRETTY_FUNCTION__))
;
1876 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1877 std::make_unsigned_t<T> Val = ~MCE->getValue();
1878 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1879 Inst.addOperand(MCOperand::createImm(encoding));
1880 }
1881
1882 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1883 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1883
, __extension__ __PRETTY_FUNCTION__))
;
1884 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1885 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1886 Inst.addOperand(MCOperand::createImm(encoding));
1887 }
1888
1889 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1890 // Branch operands don't encode the low bits, so shift them off
1891 // here. If it's a label, however, just put it on directly as there's
1892 // not enough information now to do anything.
1893 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1893
, __extension__ __PRETTY_FUNCTION__))
;
1894 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1895 if (!MCE) {
1896 addExpr(Inst, getImm());
1897 return;
1898 }
1899 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1899
, __extension__ __PRETTY_FUNCTION__))
;
1900 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1901 }
1902
1903 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1904 // Branch operands don't encode the low bits, so shift them off
1905 // here. If it's a label, however, just put it on directly as there's
1906 // not enough information now to do anything.
1907 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1907
, __extension__ __PRETTY_FUNCTION__))
;
1908 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1909 if (!MCE) {
1910 addExpr(Inst, getImm());
1911 return;
1912 }
1913 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1913
, __extension__ __PRETTY_FUNCTION__))
;
1914 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1915 }
1916
1917 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1918 // Branch operands don't encode the low bits, so shift them off
1919 // here. If it's a label, however, just put it on directly as there's
1920 // not enough information now to do anything.
1921 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1921
, __extension__ __PRETTY_FUNCTION__))
;
1922 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1923 if (!MCE) {
1924 addExpr(Inst, getImm());
1925 return;
1926 }
1927 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1927
, __extension__ __PRETTY_FUNCTION__))
;
1928 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1929 }
1930
1931 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1932 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1932
, __extension__ __PRETTY_FUNCTION__))
;
1933 Inst.addOperand(MCOperand::createImm(
1934 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1935 }
1936
1937 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1938 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1938
, __extension__ __PRETTY_FUNCTION__))
;
1939 Inst.addOperand(MCOperand::createImm(getBarrier()));
1940 }
1941
1942 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
1943 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1943
, __extension__ __PRETTY_FUNCTION__))
;
1944 Inst.addOperand(MCOperand::createImm(getBarrier()));
1945 }
1946
1947 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1948 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1948
, __extension__ __PRETTY_FUNCTION__))
;
1949
1950 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1951 }
1952
1953 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1954 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1954
, __extension__ __PRETTY_FUNCTION__))
;
1955
1956 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1957 }
1958
1959 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1960 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1960
, __extension__ __PRETTY_FUNCTION__))
;
1961
1962 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1963 }
1964
1965 void addSVCROperands(MCInst &Inst, unsigned N) const {
1966 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1966
, __extension__ __PRETTY_FUNCTION__))
;
1967
1968 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
1969 }
1970
1971 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1972 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1972
, __extension__ __PRETTY_FUNCTION__))
;
1973
1974 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1975 }
1976
1977 void addSysCROperands(MCInst &Inst, unsigned N) const {
1978 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1978
, __extension__ __PRETTY_FUNCTION__))
;
1979 Inst.addOperand(MCOperand::createImm(getSysCR()));
1980 }
1981
1982 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1983 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1983
, __extension__ __PRETTY_FUNCTION__))
;
1984 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1985 }
1986
1987 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1988 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1988
, __extension__ __PRETTY_FUNCTION__))
;
1989 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1990 }
1991
1992 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1993 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1993
, __extension__ __PRETTY_FUNCTION__))
;
1994 Inst.addOperand(MCOperand::createImm(getBTIHint()));
1995 }
1996
1997 void addShifterOperands(MCInst &Inst, unsigned N) const {
1998 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1998
, __extension__ __PRETTY_FUNCTION__))
;
1999 unsigned Imm =
2000 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2001 Inst.addOperand(MCOperand::createImm(Imm));
2002 }
2003
2004 void addExtendOperands(MCInst &Inst, unsigned N) const {
2005 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2005
, __extension__ __PRETTY_FUNCTION__))
;
2006 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2007 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2008 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2009 Inst.addOperand(MCOperand::createImm(Imm));
2010 }
2011
2012 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2013 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2013
, __extension__ __PRETTY_FUNCTION__))
;
2014 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2015 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2016 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2017 Inst.addOperand(MCOperand::createImm(Imm));
2018 }
2019
2020 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2021 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2021
, __extension__ __PRETTY_FUNCTION__))
;
2022 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2023 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2024 Inst.addOperand(MCOperand::createImm(IsSigned));
2025 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2026 }
2027
2028 // For 8-bit load/store instructions with a register offset, both the
2029 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2030 // they're disambiguated by whether the shift was explicit or implicit rather
2031 // than its size.
2032 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2033 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2033
, __extension__ __PRETTY_FUNCTION__))
;
2034 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2035 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2036 Inst.addOperand(MCOperand::createImm(IsSigned));
2037 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2038 }
2039
2040 template<int Shift>
2041 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2042 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2042
, __extension__ __PRETTY_FUNCTION__))
;
2043
2044 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2045 if (CE) {
2046 uint64_t Value = CE->getValue();
2047 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2048 } else {
2049 addExpr(Inst, getImm());
2050 }
2051 }
2052
2053 template<int Shift>
2054 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2055 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2055
, __extension__ __PRETTY_FUNCTION__))
;
2056
2057 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2058 uint64_t Value = CE->getValue();
2059 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2060 }
2061
2062 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2063 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2063
, __extension__ __PRETTY_FUNCTION__))
;
2064 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2065 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2066 }
2067
2068 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2069 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2069
, __extension__ __PRETTY_FUNCTION__))
;
2070 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2071 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2072 }
2073
2074 void print(raw_ostream &OS) const override;
2075
2076 static std::unique_ptr<AArch64Operand>
2077 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2078 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2079 Op->Tok.Data = Str.data();
2080 Op->Tok.Length = Str.size();
2081 Op->Tok.IsSuffix = IsSuffix;
2082 Op->StartLoc = S;
2083 Op->EndLoc = S;
2084 return Op;
2085 }
2086
2087 static std::unique_ptr<AArch64Operand>
2088 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2089 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2090 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2091 unsigned ShiftAmount = 0,
2092 unsigned HasExplicitAmount = false) {
2093 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2094 Op->Reg.RegNum = RegNum;
2095 Op->Reg.Kind = Kind;
2096 Op->Reg.ElementWidth = 0;
2097 Op->Reg.EqualityTy = EqTy;
2098 Op->Reg.ShiftExtend.Type = ExtTy;
2099 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2100 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2101 Op->StartLoc = S;
2102 Op->EndLoc = E;
2103 return Op;
2104 }
2105
2106 static std::unique_ptr<AArch64Operand>
2107 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2108 SMLoc S, SMLoc E, MCContext &Ctx,
2109 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2110 unsigned ShiftAmount = 0,
2111 unsigned HasExplicitAmount = false) {
2112 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
|| Kind == RegKind::SVEPredicateAsCounter) && "Invalid vector kind"
) ? void (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && \"Invalid vector kind\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2115
, __extension__ __PRETTY_FUNCTION__))
2113 Kind == RegKind::SVEPredicateVector ||(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
|| Kind == RegKind::SVEPredicateAsCounter) && "Invalid vector kind"
) ? void (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && \"Invalid vector kind\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2115
, __extension__ __PRETTY_FUNCTION__))
2114 Kind == RegKind::SVEPredicateAsCounter) &&(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
|| Kind == RegKind::SVEPredicateAsCounter) && "Invalid vector kind"
) ? void (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && \"Invalid vector kind\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2115
, __extension__ __PRETTY_FUNCTION__))
2115 "Invalid vector kind")(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
|| Kind == RegKind::SVEPredicateAsCounter) && "Invalid vector kind"
) ? void (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && \"Invalid vector kind\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2115
, __extension__ __PRETTY_FUNCTION__))
;
2116 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2117 HasExplicitAmount);
2118 Op->Reg.ElementWidth = ElementWidth;
2119 return Op;
2120 }
2121
2122 static std::unique_ptr<AArch64Operand>
2123 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
2124 unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
2125 MCContext &Ctx) {
2126 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2127 Op->VectorList.RegNum = RegNum;
2128 Op->VectorList.Count = Count;
2129 Op->VectorList.NumElements = NumElements;
2130 Op->VectorList.ElementWidth = ElementWidth;
2131 Op->VectorList.RegisterKind = RegisterKind;
2132 Op->StartLoc = S;
2133 Op->EndLoc = E;
2134 return Op;
2135 }
2136
2137 static std::unique_ptr<AArch64Operand>
2138 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2139 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2140 Op->VectorIndex.Val = Idx;
2141 Op->StartLoc = S;
2142 Op->EndLoc = E;
2143 return Op;
2144 }
2145
2146 static std::unique_ptr<AArch64Operand>
2147 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2148 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2149 Op->MatrixTileList.RegMask = RegMask;
2150 Op->StartLoc = S;
2151 Op->EndLoc = E;
2152 return Op;
2153 }
2154
2155 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2156 const unsigned ElementWidth) {
2157 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2158 RegMap = {
2159 {{0, AArch64::ZAB0},
2160 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2161 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2162 {{8, AArch64::ZAB0},
2163 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2164 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2165 {{16, AArch64::ZAH0},
2166 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2167 {{16, AArch64::ZAH1},
2168 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2169 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2170 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2171 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2172 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2173 };
2174
2175 if (ElementWidth == 64)
2176 OutRegs.insert(Reg);
2177 else {
2178 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2179 assert(!Regs.empty() && "Invalid tile or element width!")(static_cast <bool> (!Regs.empty() && "Invalid tile or element width!"
) ? void (0) : __assert_fail ("!Regs.empty() && \"Invalid tile or element width!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2179
, __extension__ __PRETTY_FUNCTION__))
;
2180 for (auto OutReg : Regs)
2181 OutRegs.insert(OutReg);
2182 }
2183 }
2184
2185 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2186 SMLoc E, MCContext &Ctx) {
2187 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2188 Op->Imm.Val = Val;
2189 Op->StartLoc = S;
2190 Op->EndLoc = E;
2191 return Op;
2192 }
2193
2194 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2195 unsigned ShiftAmount,
2196 SMLoc S, SMLoc E,
2197 MCContext &Ctx) {
2198 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2199 Op->ShiftedImm .Val = Val;
2200 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2201 Op->StartLoc = S;
2202 Op->EndLoc = E;
2203 return Op;
2204 }
2205
2206 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2207 unsigned Last, SMLoc S,
2208 SMLoc E,
2209 MCContext &Ctx) {
2210 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2211 Op->ImmRange.First = First;
2212 Op->ImmRange.Last = Last;
2213 Op->EndLoc = E;
2214 return Op;
2215 }
2216
2217 static std::unique_ptr<AArch64Operand>
2218 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2219 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2220 Op->CondCode.Code = Code;
2221 Op->StartLoc = S;
2222 Op->EndLoc = E;
2223 return Op;
2224 }
2225
2226 static std::unique_ptr<AArch64Operand>
2227 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2228 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2229 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2230 Op->FPImm.IsExact = IsExact;
2231 Op->StartLoc = S;
2232 Op->EndLoc = S;
2233 return Op;
2234 }
2235
2236 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2237 StringRef Str,
2238 SMLoc S,
2239 MCContext &Ctx,
2240 bool HasnXSModifier) {
2241 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2242 Op->Barrier.Val = Val;
2243 Op->Barrier.Data = Str.data();
2244 Op->Barrier.Length = Str.size();
2245 Op->Barrier.HasnXSModifier = HasnXSModifier;
2246 Op->StartLoc = S;
2247 Op->EndLoc = S;
2248 return Op;
2249 }
2250
2251 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2252 uint32_t MRSReg,
2253 uint32_t MSRReg,
2254 uint32_t PStateField,
2255 MCContext &Ctx) {
2256 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2257 Op->SysReg.Data = Str.data();
2258 Op->SysReg.Length = Str.size();
2259 Op->SysReg.MRSReg = MRSReg;
2260 Op->SysReg.MSRReg = MSRReg;
2261 Op->SysReg.PStateField = PStateField;
2262 Op->StartLoc = S;
2263 Op->EndLoc = S;
2264 return Op;
2265 }
2266
2267 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2268 SMLoc E, MCContext &Ctx) {
2269 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2270 Op->SysCRImm.Val = Val;
2271 Op->StartLoc = S;
2272 Op->EndLoc = E;
2273 return Op;
2274 }
2275
2276 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2277 StringRef Str,
2278 SMLoc S,
2279 MCContext &Ctx) {
2280 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2281 Op->Prefetch.Val = Val;
2282 Op->Barrier.Data = Str.data();
2283 Op->Barrier.Length = Str.size();
2284 Op->StartLoc = S;
2285 Op->EndLoc = S;
2286 return Op;
2287 }
2288
2289 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2290 StringRef Str,
2291 SMLoc S,
2292 MCContext &Ctx) {
2293 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2294 Op->PSBHint.Val = Val;
2295 Op->PSBHint.Data = Str.data();
2296 Op->PSBHint.Length = Str.size();
2297 Op->StartLoc = S;
2298 Op->EndLoc = S;
2299 return Op;
2300 }
2301
2302 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2303 StringRef Str,
2304 SMLoc S,
2305 MCContext &Ctx) {
2306 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2307 Op->BTIHint.Val = Val | 32;
2308 Op->BTIHint.Data = Str.data();
2309 Op->BTIHint.Length = Str.size();
2310 Op->StartLoc = S;
2311 Op->EndLoc = S;
2312 return Op;
2313 }
2314
2315 static std::unique_ptr<AArch64Operand>
2316 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2317 SMLoc S, SMLoc E, MCContext &Ctx) {
2318 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2319 Op->MatrixReg.RegNum = RegNum;
2320 Op->MatrixReg.ElementWidth = ElementWidth;
2321 Op->MatrixReg.Kind = Kind;
2322 Op->StartLoc = S;
2323 Op->EndLoc = E;
2324 return Op;
2325 }
2326
2327 static std::unique_ptr<AArch64Operand>
2328 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2329 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2330 Op->SVCR.PStateField = PStateField;
2331 Op->SVCR.Data = Str.data();
2332 Op->SVCR.Length = Str.size();
2333 Op->StartLoc = S;
2334 Op->EndLoc = S;
2335 return Op;
2336 }
2337
2338 static std::unique_ptr<AArch64Operand>
2339 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2340 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2341 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2342 Op->ShiftExtend.Type = ShOp;
2343 Op->ShiftExtend.Amount = Val;
2344 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2345 Op->StartLoc = S;
2346 Op->EndLoc = E;
2347 return Op;
2348 }
2349};
2350
2351} // end anonymous namespace.
2352
2353void AArch64Operand::print(raw_ostream &OS) const {
2354 switch (Kind) {
2355 case k_FPImm:
2356 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2357 if (!getFPImmIsExact())
2358 OS << " (inexact)";
2359 OS << ">";
2360 break;
2361 case k_Barrier: {
2362 StringRef Name = getBarrierName();
2363 if (!Name.empty())
2364 OS << "<barrier " << Name << ">";
2365 else
2366 OS << "<barrier invalid #" << getBarrier() << ">";
2367 break;
2368 }
2369 case k_Immediate:
2370 OS << *getImm();
2371 break;
2372 case k_ShiftedImm: {
2373 unsigned Shift = getShiftedImmShift();
2374 OS << "<shiftedimm ";
2375 OS << *getShiftedImmVal();
2376 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2377 break;
2378 }
2379 case k_ImmRange: {
2380 OS << "<immrange ";
2381 OS << getFirstImmVal();
2382 OS << ":" << getLastImmVal() << ">";
2383 break;
2384 }
2385 case k_CondCode:
2386 OS << "<condcode " << getCondCode() << ">";
2387 break;
2388 case k_VectorList: {
2389 OS << "<vectorlist ";
2390 unsigned Reg = getVectorListStart();
2391 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2392 OS << Reg + i << " ";
2393 OS << ">";
2394 break;
2395 }
2396 case k_VectorIndex:
2397 OS << "<vectorindex " << getVectorIndex() << ">";
2398 break;
2399 case k_SysReg:
2400 OS << "<sysreg: " << getSysReg() << '>';
2401 break;
2402 case k_Token:
2403 OS << "'" << getToken() << "'";
2404 break;
2405 case k_SysCR:
2406 OS << "c" << getSysCR();
2407 break;
2408 case k_Prefetch: {
2409 StringRef Name = getPrefetchName();
2410 if (!Name.empty())
2411 OS << "<prfop " << Name << ">";
2412 else
2413 OS << "<prfop invalid #" << getPrefetch() << ">";
2414 break;
2415 }
2416 case k_PSBHint:
2417 OS << getPSBHintName();
2418 break;
2419 case k_BTIHint:
2420 OS << getBTIHintName();
2421 break;
2422 case k_MatrixRegister:
2423 OS << "<matrix " << getMatrixReg() << ">";
2424 break;
2425 case k_MatrixTileList: {
2426 OS << "<matrixlist ";
2427 unsigned RegMask = getMatrixTileListRegMask();
2428 unsigned MaxBits = 8;
2429 for (unsigned I = MaxBits; I > 0; --I)
2430 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2431 OS << '>';
2432 break;
2433 }
2434 case k_SVCR: {
2435 OS << getSVCR();
2436 break;
2437 }
2438 case k_Register:
2439 OS << "<register " << getReg() << ">";
2440 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2441 break;
2442 [[fallthrough]];
2443 case k_ShiftExtend:
2444 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2445 << getShiftExtendAmount();
2446 if (!hasShiftExtendAmount())
2447 OS << "<imp>";
2448 OS << '>';
2449 break;
2450 }
2451}
2452
2453/// @name Auto-generated Match Functions
2454/// {
2455
2456static unsigned MatchRegisterName(StringRef Name);
2457
2458/// }
2459
2460static unsigned MatchNeonVectorRegName(StringRef Name) {
2461 return StringSwitch<unsigned>(Name.lower())
2462 .Case("v0", AArch64::Q0)
2463 .Case("v1", AArch64::Q1)
2464 .Case("v2", AArch64::Q2)
2465 .Case("v3", AArch64::Q3)
2466 .Case("v4", AArch64::Q4)
2467 .Case("v5", AArch64::Q5)
2468 .Case("v6", AArch64::Q6)
2469 .Case("v7", AArch64::Q7)
2470 .Case("v8", AArch64::Q8)
2471 .Case("v9", AArch64::Q9)
2472 .Case("v10", AArch64::Q10)
2473 .Case("v11", AArch64::Q11)
2474 .Case("v12", AArch64::Q12)
2475 .Case("v13", AArch64::Q13)
2476 .Case("v14", AArch64::Q14)
2477 .Case("v15", AArch64::Q15)
2478 .Case("v16", AArch64::Q16)
2479 .Case("v17", AArch64::Q17)
2480 .Case("v18", AArch64::Q18)
2481 .Case("v19", AArch64::Q19)
2482 .Case("v20", AArch64::Q20)
2483 .Case("v21", AArch64::Q21)
2484 .Case("v22", AArch64::Q22)
2485 .Case("v23", AArch64::Q23)
2486 .Case("v24", AArch64::Q24)
2487 .Case("v25", AArch64::Q25)
2488 .Case("v26", AArch64::Q26)
2489 .Case("v27", AArch64::Q27)
2490 .Case("v28", AArch64::Q28)
2491 .Case("v29", AArch64::Q29)
2492 .Case("v30", AArch64::Q30)
2493 .Case("v31", AArch64::Q31)
2494 .Default(0);
2495}
2496
2497/// Returns an optional pair of (#elements, element-width) if Suffix
2498/// is a valid vector kind. Where the number of elements in a vector
2499/// or the vector width is implicit or explicitly unknown (but still a
2500/// valid suffix kind), 0 is used.
2501static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2502 RegKind VectorKind) {
2503 std::pair<int, int> Res = {-1, -1};
2504
2505 switch (VectorKind) {
2506 case RegKind::NeonVector:
2507 Res =
2508 StringSwitch<std::pair<int, int>>(Suffix.lower())
2509 .Case("", {0, 0})
2510 .Case(".1d", {1, 64})
2511 .Case(".1q", {1, 128})
2512 // '.2h' needed for fp16 scalar pairwise reductions
2513 .Case(".2h", {2, 16})
2514 .Case(".2s", {2, 32})
2515 .Case(".2d", {2, 64})
2516 // '.4b' is another special case for the ARMv8.2a dot product
2517 // operand
2518 .Case(".4b", {4, 8})
2519 .Case(".4h", {4, 16})
2520 .Case(".4s", {4, 32})
2521 .Case(".8b", {8, 8})
2522 .Case(".8h", {8, 16})
2523 .Case(".16b", {16, 8})
2524 // Accept the width neutral ones, too, for verbose syntax. If those
2525 // aren't used in the right places, the token operand won't match so
2526 // all will work out.
2527 .Case(".b", {0, 8})
2528 .Case(".h", {0, 16})
2529 .Case(".s", {0, 32})
2530 .Case(".d", {0, 64})
2531 .Default({-1, -1});
2532 break;
2533 case RegKind::SVEPredicateAsCounter:
2534 case RegKind::SVEPredicateVector:
2535 case RegKind::SVEDataVector:
2536 case RegKind::Matrix:
2537 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2538 .Case("", {0, 0})
2539 .Case(".b", {0, 8})
2540 .Case(".h", {0, 16})
2541 .Case(".s", {0, 32})
2542 .Case(".d", {0, 64})
2543 .Case(".q", {0, 128})
2544 .Default({-1, -1});
2545 break;
2546 default:
2547 llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2547)
;
2548 }
2549
2550 if (Res == std::make_pair(-1, -1))
2551 return Optional<std::pair<int, int>>();
2552
2553 return Optional<std::pair<int, int>>(Res);
2554}
2555
2556static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2557 return parseVectorKind(Suffix, VectorKind).has_value();
2558}
2559
2560static unsigned matchSVEDataVectorRegName(StringRef Name) {
2561 return StringSwitch<unsigned>(Name.lower())
2562 .Case("z0", AArch64::Z0)
2563 .Case("z1", AArch64::Z1)
2564 .Case("z2", AArch64::Z2)
2565 .Case("z3", AArch64::Z3)
2566 .Case("z4", AArch64::Z4)
2567 .Case("z5", AArch64::Z5)
2568 .Case("z6", AArch64::Z6)
2569 .Case("z7", AArch64::Z7)
2570 .Case("z8", AArch64::Z8)
2571 .Case("z9", AArch64::Z9)
2572 .Case("z10", AArch64::Z10)
2573 .Case("z11", AArch64::Z11)
2574 .Case("z12", AArch64::Z12)
2575 .Case("z13", AArch64::Z13)
2576 .Case("z14", AArch64::Z14)
2577 .Case("z15", AArch64::Z15)
2578 .Case("z16", AArch64::Z16)
2579 .Case("z17", AArch64::Z17)
2580 .Case("z18", AArch64::Z18)
2581 .Case("z19", AArch64::Z19)
2582 .Case("z20", AArch64::Z20)
2583 .Case("z21", AArch64::Z21)
2584 .Case("z22", AArch64::Z22)
2585 .Case("z23", AArch64::Z23)
2586 .Case("z24", AArch64::Z24)
2587 .Case("z25", AArch64::Z25)
2588 .Case("z26", AArch64::Z26)
2589 .Case("z27", AArch64::Z27)
2590 .Case("z28", AArch64::Z28)
2591 .Case("z29", AArch64::Z29)
2592 .Case("z30", AArch64::Z30)
2593 .Case("z31", AArch64::Z31)
2594 .Default(0);
2595}
2596
2597static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2598 return StringSwitch<unsigned>(Name.lower())
2599 .Case("p0", AArch64::P0)
2600 .Case("p1", AArch64::P1)
2601 .Case("p2", AArch64::P2)
2602 .Case("p3", AArch64::P3)
2603 .Case("p4", AArch64::P4)
2604 .Case("p5", AArch64::P5)
2605 .Case("p6", AArch64::P6)
2606 .Case("p7", AArch64::P7)
2607 .Case("p8", AArch64::P8)
2608 .Case("p9", AArch64::P9)
2609 .Case("p10", AArch64::P10)
2610 .Case("p11", AArch64::P11)
2611 .Case("p12", AArch64::P12)
2612 .Case("p13", AArch64::P13)
2613 .Case("p14", AArch64::P14)
2614 .Case("p15", AArch64::P15)
2615 .Default(0);
2616}
2617
2618static unsigned matchSVEPredicateAsCounterRegName(StringRef Name) {
2619 return StringSwitch<unsigned>(Name.lower())
2620 .Case("pn0", AArch64::P0)
2621 .Case("pn1", AArch64::P1)
2622 .Case("pn2", AArch64::P2)
2623 .Case("pn3", AArch64::P3)
2624 .Case("pn4", AArch64::P4)
2625 .Case("pn5", AArch64::P5)
2626 .Case("pn6", AArch64::P6)
2627 .Case("pn7", AArch64::P7)
2628 .Case("pn8", AArch64::P8)
2629 .Case("pn9", AArch64::P9)
2630 .Case("pn10", AArch64::P10)
2631 .Case("pn11", AArch64::P11)
2632 .Case("pn12", AArch64::P12)
2633 .Case("pn13", AArch64::P13)
2634 .Case("pn14", AArch64::P14)
2635 .Case("pn15", AArch64::P15)
2636 .Default(0);
2637}
2638
2639static unsigned matchMatrixTileListRegName(StringRef Name) {
2640 return StringSwitch<unsigned>(Name.lower())
2641 .Case("za0.d", AArch64::ZAD0)
2642 .Case("za1.d", AArch64::ZAD1)
2643 .Case("za2.d", AArch64::ZAD2)
2644 .Case("za3.d", AArch64::ZAD3)
2645 .Case("za4.d", AArch64::ZAD4)
2646 .Case("za5.d", AArch64::ZAD5)
2647 .Case("za6.d", AArch64::ZAD6)
2648 .Case("za7.d", AArch64::ZAD7)
2649 .Case("za0.s", AArch64::ZAS0)
2650 .Case("za1.s", AArch64::ZAS1)
2651 .Case("za2.s", AArch64::ZAS2)
2652 .Case("za3.s", AArch64::ZAS3)
2653 .Case("za0.h", AArch64::ZAH0)
2654 .Case("za1.h", AArch64::ZAH1)
2655 .Case("za0.b", AArch64::ZAB0)
2656 .Default(0);
2657}
2658
2659static unsigned matchMatrixRegName(StringRef Name) {
2660 return StringSwitch<unsigned>(Name.lower())
2661 .Case("za", AArch64::ZA)
2662 .Case("za0.q", AArch64::ZAQ0)
2663 .Case("za1.q", AArch64::ZAQ1)
2664 .Case("za2.q", AArch64::ZAQ2)
2665 .Case("za3.q", AArch64::ZAQ3)
2666 .Case("za4.q", AArch64::ZAQ4)
2667 .Case("za5.q", AArch64::ZAQ5)
2668 .Case("za6.q", AArch64::ZAQ6)
2669 .Case("za7.q", AArch64::ZAQ7)
2670 .Case("za8.q", AArch64::ZAQ8)
2671 .Case("za9.q", AArch64::ZAQ9)
2672 .Case("za10.q", AArch64::ZAQ10)
2673 .Case("za11.q", AArch64::ZAQ11)
2674 .Case("za12.q", AArch64::ZAQ12)
2675 .Case("za13.q", AArch64::ZAQ13)
2676 .Case("za14.q", AArch64::ZAQ14)
2677 .Case("za15.q", AArch64::ZAQ15)
2678 .Case("za0.d", AArch64::ZAD0)
2679 .Case("za1.d", AArch64::ZAD1)
2680 .Case("za2.d", AArch64::ZAD2)
2681 .Case("za3.d", AArch64::ZAD3)
2682 .Case("za4.d", AArch64::ZAD4)
2683 .Case("za5.d", AArch64::ZAD5)
2684 .Case("za6.d", AArch64::ZAD6)
2685 .Case("za7.d", AArch64::ZAD7)
2686 .Case("za0.s", AArch64::ZAS0)
2687 .Case("za1.s", AArch64::ZAS1)
2688 .Case("za2.s", AArch64::ZAS2)
2689 .Case("za3.s", AArch64::ZAS3)
2690 .Case("za0.h", AArch64::ZAH0)
2691 .Case("za1.h", AArch64::ZAH1)
2692 .Case("za0.b", AArch64::ZAB0)
2693 .Case("za0h.q", AArch64::ZAQ0)
2694 .Case("za1h.q", AArch64::ZAQ1)
2695 .Case("za2h.q", AArch64::ZAQ2)
2696 .Case("za3h.q", AArch64::ZAQ3)
2697 .Case("za4h.q", AArch64::ZAQ4)
2698 .Case("za5h.q", AArch64::ZAQ5)
2699 .Case("za6h.q", AArch64::ZAQ6)
2700 .Case("za7h.q", AArch64::ZAQ7)
2701 .Case("za8h.q", AArch64::ZAQ8)
2702 .Case("za9h.q", AArch64::ZAQ9)
2703 .Case("za10h.q", AArch64::ZAQ10)
2704 .Case("za11h.q", AArch64::ZAQ11)
2705 .Case("za12h.q", AArch64::ZAQ12)
2706 .Case("za13h.q", AArch64::ZAQ13)
2707 .Case("za14h.q", AArch64::ZAQ14)
2708 .Case("za15h.q", AArch64::ZAQ15)
2709 .Case("za0h.d", AArch64::ZAD0)
2710 .Case("za1h.d", AArch64::ZAD1)
2711 .Case("za2h.d", AArch64::ZAD2)
2712 .Case("za3h.d", AArch64::ZAD3)
2713 .Case("za4h.d", AArch64::ZAD4)
2714 .Case("za5h.d", AArch64::ZAD5)
2715 .Case("za6h.d", AArch64::ZAD6)
2716 .Case("za7h.d", AArch64::ZAD7)
2717 .Case("za0h.s", AArch64::ZAS0)
2718 .Case("za1h.s", AArch64::ZAS1)
2719 .Case("za2h.s", AArch64::ZAS2)
2720 .Case("za3h.s", AArch64::ZAS3)
2721 .Case("za0h.h", AArch64::ZAH0)
2722 .Case("za1h.h", AArch64::ZAH1)
2723 .Case("za0h.b", AArch64::ZAB0)
2724 .Case("za0v.q", AArch64::ZAQ0)
2725 .Case("za1v.q", AArch64::ZAQ1)
2726 .Case("za2v.q", AArch64::ZAQ2)
2727 .Case("za3v.q", AArch64::ZAQ3)
2728 .Case("za4v.q", AArch64::ZAQ4)
2729 .Case("za5v.q", AArch64::ZAQ5)
2730 .Case("za6v.q", AArch64::ZAQ6)
2731 .Case("za7v.q", AArch64::ZAQ7)
2732 .Case("za8v.q", AArch64::ZAQ8)
2733 .Case("za9v.q", AArch64::ZAQ9)
2734 .Case("za10v.q", AArch64::ZAQ10)
2735 .Case("za11v.q", AArch64::ZAQ11)
2736 .Case("za12v.q", AArch64::ZAQ12)
2737 .Case("za13v.q", AArch64::ZAQ13)
2738 .Case("za14v.q", AArch64::ZAQ14)
2739 .Case("za15v.q", AArch64::ZAQ15)
2740 .Case("za0v.d", AArch64::ZAD0)
2741 .Case("za1v.d", AArch64::ZAD1)
2742 .Case("za2v.d", AArch64::ZAD2)
2743 .Case("za3v.d", AArch64::ZAD3)
2744 .Case("za4v.d", AArch64::ZAD4)
2745 .Case("za5v.d", AArch64::ZAD5)
2746 .Case("za6v.d", AArch64::ZAD6)
2747 .Case("za7v.d", AArch64::ZAD7)
2748 .Case("za0v.s", AArch64::ZAS0)
2749 .Case("za1v.s", AArch64::ZAS1)
2750 .Case("za2v.s", AArch64::ZAS2)
2751 .Case("za3v.s", AArch64::ZAS3)
2752 .Case("za0v.h", AArch64::ZAH0)
2753 .Case("za1v.h", AArch64::ZAH1)
2754 .Case("za0v.b", AArch64::ZAB0)
2755 .Default(0);
2756}
2757
2758bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2759 SMLoc &EndLoc) {
2760 return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
2761}
2762
2763OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2764 SMLoc &StartLoc,
2765 SMLoc &EndLoc) {
2766 StartLoc = getLoc();
2767 auto Res = tryParseScalarRegister(RegNo);
2768 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2769 return Res;
2770}
2771
2772// Matches a register name or register alias previously defined by '.req'
2773unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2774 RegKind Kind) {
2775 unsigned RegNum = 0;
2776 if ((RegNum = matchSVEDataVectorRegName(Name)))
2777 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2778
2779 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2780 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2781
2782 if ((RegNum = matchSVEPredicateAsCounterRegName(Name)))
2783 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
2784
2785 if ((RegNum = MatchNeonVectorRegName(Name)))
2786 return Kind == RegKind::NeonVector ? RegNum : 0;
2787
2788 if ((RegNum = matchMatrixRegName(Name)))
2789 return Kind == RegKind::Matrix ? RegNum : 0;
2790
2791 if (Name.equals_insensitive("zt0"))
2792 return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0;
2793
2794 // The parsed register must be of RegKind Scalar
2795 if ((RegNum = MatchRegisterName(Name)))
2796 return (Kind == RegKind::Scalar) ? RegNum : 0;
2797
2798 if (!RegNum) {
2799 // Handle a few common aliases of registers.
2800 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2801 .Case("fp", AArch64::FP)
2802 .Case("lr", AArch64::LR)
2803 .Case("x31", AArch64::XZR)
2804 .Case("w31", AArch64::WZR)
2805 .Default(0))
2806 return Kind == RegKind::Scalar ? RegNum : 0;
2807
2808 // Check for aliases registered via .req. Canonicalize to lower case.
2809 // That's more consistent since register names are case insensitive, and
2810 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2811 auto Entry = RegisterReqs.find(Name.lower());
2812 if (Entry == RegisterReqs.end())
2813 return 0;
2814
2815 // set RegNum if the match is the right kind of register
2816 if (Kind == Entry->getValue().first)
2817 RegNum = Entry->getValue().second;
2818 }
2819 return RegNum;
2820}
2821
2822unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
2823 switch (K) {
2824 case RegKind::Scalar:
2825 case RegKind::NeonVector:
2826 case RegKind::SVEDataVector:
2827 return 32;
2828 case RegKind::Matrix:
2829 case RegKind::SVEPredicateVector:
2830 case RegKind::SVEPredicateAsCounter:
2831 return 16;
2832 case RegKind::LookupTable:
2833 return 512;
2834 }
2835 llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2835)
;
2836}
2837
2838/// tryParseScalarRegister - Try to parse a register name. The token must be an
2839/// Identifier when called, and if it is a register name the token is eaten and
2840/// the register is added to the operand list.
2841OperandMatchResultTy
2842AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2843 const AsmToken &Tok = getTok();
2844 if (Tok.isNot(AsmToken::Identifier))
2845 return MatchOperand_NoMatch;
2846
2847 std::string lowerCase = Tok.getString().lower();
2848 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2849 if (Reg == 0)
2850 return MatchOperand_NoMatch;
2851
2852 RegNum = Reg;
2853 Lex(); // Eat identifier token.
2854 return MatchOperand_Success;
2855}
2856
2857/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2858OperandMatchResultTy
2859AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2860 SMLoc S = getLoc();
2861
2862 if (getTok().isNot(AsmToken::Identifier)) {
2863 Error(S, "Expected cN operand where 0 <= N <= 15");
2864 return MatchOperand_ParseFail;
2865 }
2866
2867 StringRef Tok = getTok().getIdentifier();
2868 if (Tok[0] != 'c' && Tok[0] != 'C') {
2869 Error(S, "Expected cN operand where 0 <= N <= 15");
2870 return MatchOperand_ParseFail;
2871 }
2872
2873 uint32_t CRNum;
2874 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2875 if (BadNum || CRNum > 15) {
2876 Error(S, "Expected cN operand where 0 <= N <= 15");
2877 return MatchOperand_ParseFail;
2878 }
2879
2880 Lex(); // Eat identifier token.
2881 Operands.push_back(
2882 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2883 return MatchOperand_Success;
2884}
2885
2886/// tryParsePrefetch - Try to parse a prefetch operand.
2887template <bool IsSVEPrefetch>
2888OperandMatchResultTy
2889AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2890 SMLoc S = getLoc();
2891 const AsmToken &Tok = getTok();
2892
2893 auto LookupByName = [](StringRef N) {
2894 if (IsSVEPrefetch) {
2895 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2896 return Optional<unsigned>(Res->Encoding);
2897 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2898 return Optional<unsigned>(Res->Encoding);
2899 return Optional<unsigned>();
2900 };
2901
2902 auto LookupByEncoding = [](unsigned E) {
2903 if (IsSVEPrefetch) {
2904 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2905 return Optional<StringRef>(Res->Name);
2906 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2907 return Optional<StringRef>(Res->Name);
2908 return Optional<StringRef>();
2909 };
2910 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2911
2912 // Either an identifier for named values or a 5-bit immediate.
2913 // Eat optional hash.
2914 if (parseOptionalToken(AsmToken::Hash) ||
2915 Tok.is(AsmToken::Integer)) {
2916 const MCExpr *ImmVal;
2917 if (getParser().parseExpression(ImmVal))
2918 return MatchOperand_ParseFail;
2919
2920 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2921 if (!MCE) {
2922 TokError("immediate value expected for prefetch operand");
2923 return MatchOperand_ParseFail;
2924 }
2925 unsigned prfop = MCE->getValue();
2926 if (prfop > MaxVal) {
2927 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2928 "] expected");
2929 return MatchOperand_ParseFail;
2930 }
2931
2932 auto PRFM = LookupByEncoding(MCE->getValue());
2933 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
2934 S, getContext()));
2935 return MatchOperand_Success;
2936 }
2937
2938 if (Tok.isNot(AsmToken::Identifier)) {
2939 TokError("prefetch hint expected");
2940 return MatchOperand_ParseFail;
2941 }
2942
2943 auto PRFM = LookupByName(Tok.getString());
2944 if (!PRFM) {
2945 TokError("prefetch hint expected");
2946 return MatchOperand_ParseFail;
2947 }
2948
2949 Operands.push_back(AArch64Operand::CreatePrefetch(
2950 *PRFM, Tok.getString(), S, getContext()));
2951 Lex(); // Eat identifier token.
2952 return MatchOperand_Success;
2953}
2954
2955/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2956OperandMatchResultTy
2957AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2958 SMLoc S = getLoc();
2959 const AsmToken &Tok = getTok();
2960 if (Tok.isNot(AsmToken::Identifier)) {
2961 TokError("invalid operand for instruction");
2962 return MatchOperand_ParseFail;
2963 }
2964
2965 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2966 if (!PSB) {
2967 TokError("invalid operand for instruction");
2968 return MatchOperand_ParseFail;
2969 }
2970
2971 Operands.push_back(AArch64Operand::CreatePSBHint(
2972 PSB->Encoding, Tok.getString(), S, getContext()));
2973 Lex(); // Eat identifier token.
2974 return MatchOperand_Success;
2975}
2976
2977/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2978OperandMatchResultTy
2979AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2980 SMLoc S = getLoc();
2981 const AsmToken &Tok = getTok();
2982 if (Tok.isNot(AsmToken::Identifier)) {
2983 TokError("invalid operand for instruction");
2984 return MatchOperand_ParseFail;
2985 }
2986
2987 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2988 if (!BTI) {
2989 TokError("invalid operand for instruction");
2990 return MatchOperand_ParseFail;
2991 }
2992
2993 Operands.push_back(AArch64Operand::CreateBTIHint(
2994 BTI->Encoding, Tok.getString(), S, getContext()));
2995 Lex(); // Eat identifier token.
2996 return MatchOperand_Success;
2997}
2998
2999/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3000/// instruction.
3001OperandMatchResultTy
3002AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3003 SMLoc S = getLoc();
3004 const MCExpr *Expr = nullptr;
3005
3006 if (getTok().is(AsmToken::Hash)) {
3007 Lex(); // Eat hash token.
3008 }
3009
3010 if (parseSymbolicImmVal(Expr))
3011 return MatchOperand_ParseFail;
3012
3013 AArch64MCExpr::VariantKind ELFRefKind;
3014 MCSymbolRefExpr::VariantKind DarwinRefKind;
3015 int64_t Addend;
3016 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3017 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3018 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3019 // No modifier was specified at all; this is the syntax for an ELF basic
3020 // ADRP relocation (unfortunately).
3021 Expr =
3022 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
3023 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3024 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3025 Addend != 0) {
3026 Error(S, "gotpage label reference not allowed an addend");
3027 return MatchOperand_ParseFail;
3028 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3029 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3030 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3031 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3032 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3033 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3034 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3035 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
3036 // The operand must be an @page or @gotpage qualified symbolref.
3037 Error(S, "page or gotpage label reference expected");
3038 return MatchOperand_ParseFail;
3039 }
3040 }
3041
3042 // We have either a label reference possibly with addend or an immediate. The
3043 // addend is a raw value here. The linker will adjust it to only reference the
3044 // page.
3045 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3046 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3047
3048 return MatchOperand_Success;
3049}
3050
3051/// tryParseAdrLabel - Parse and validate a source label for the ADR
3052/// instruction.
3053OperandMatchResultTy
3054AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3055 SMLoc S = getLoc();
3056 const MCExpr *Expr = nullptr;
3057
3058 // Leave anything with a bracket to the default for SVE
3059 if (getTok().is(AsmToken::LBrac))
3060 return MatchOperand_NoMatch;
3061
3062 if (getTok().is(AsmToken::Hash))
3063 Lex(); // Eat hash token.
3064
3065 if (parseSymbolicImmVal(Expr))
3066 return MatchOperand_ParseFail;
3067
3068 AArch64MCExpr::VariantKind ELFRefKind;
3069 MCSymbolRefExpr::VariantKind DarwinRefKind;
3070 int64_t Addend;
3071 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3072 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3073 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3074 // No modifier was specified at all; this is the syntax for an ELF basic
3075 // ADR relocation (unfortunately).
3076 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3077 } else {
3078 Error(S, "unexpected adr label");
3079 return MatchOperand_ParseFail;
3080 }
3081 }
3082
3083 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3084 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3085 return MatchOperand_Success;
3086}
3087
3088/// tryParseFPImm - A floating point immediate expression operand.
3089template<bool AddFPZeroAsLiteral>
3090OperandMatchResultTy
3091AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3092 SMLoc S = getLoc();
3093
3094 bool Hash = parseOptionalToken(AsmToken::Hash);
3095
3096 // Handle negation, as that still comes through as a separate token.
3097 bool isNegative = parseOptionalToken(AsmToken::Minus);
3098
3099 const AsmToken &Tok = getTok();
3100 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3101 if (!Hash)
3102 return MatchOperand_NoMatch;
3103 TokError("invalid floating point immediate");
3104 return MatchOperand_ParseFail;
3105 }
3106
3107 // Parse hexadecimal representation.
3108 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
3109 if (Tok.getIntVal() > 255 || isNegative) {
3110 TokError("encoded floating point value out of range");
3111 return MatchOperand_ParseFail;
3112 }
3113
3114 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
3115 Operands.push_back(
3116 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3117 } else {
3118 // Parse FP representation.
3119 APFloat RealVal(APFloat::IEEEdouble());
3120 auto StatusOrErr =
3121 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3122 if (errorToBool(StatusOrErr.takeError())) {
3123 TokError("invalid floating point representation");
3124 return MatchOperand_ParseFail;
3125 }
3126
3127 if (isNegative)
3128 RealVal.changeSign();
3129
3130 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3131 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3132 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3133 } else
3134 Operands.push_back(AArch64Operand::CreateFPImm(
3135 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3136 }
3137
3138 Lex(); // Eat the token.
3139
3140 return MatchOperand_Success;
3141}
3142
3143/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3144/// a shift suffix, for example '#1, lsl #12'.
3145OperandMatchResultTy
3146AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3147 SMLoc S = getLoc();
3148
3149 if (getTok().is(AsmToken::Hash))
1
Taking true branch
3150 Lex(); // Eat '#'
3151 else if (getTok().isNot(AsmToken::Integer))
3152 // Operand should start from # or should be integer, emit error otherwise.
3153 return MatchOperand_NoMatch;
3154
3155 if (getTok().is(AsmToken::Integer) &&
2
Taking true branch
3156 getLexer().peekTok().is(AsmToken::Colon))
3157 return tryParseImmRange(Operands);
3
Calling 'AArch64AsmParser::tryParseImmRange'
3158
3159 const MCExpr *Imm = nullptr;
3160 if (parseSymbolicImmVal(Imm))
3161 return MatchOperand_ParseFail;
3162 else if (getTok().isNot(AsmToken::Comma)) {
3163 Operands.push_back(
3164 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3165 return MatchOperand_Success;
3166 }
3167
3168 // Eat ','
3169 Lex();
3170 StringRef VecGroup;
3171 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3172 Operands.push_back(
3173 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3174 Operands.push_back(
3175 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3176 return MatchOperand_Success;
3177 }
3178
3179 // The optional operand must be "lsl #N" where N is non-negative.
3180 if (!getTok().is(AsmToken::Identifier) ||
3181 !getTok().getIdentifier().equals_insensitive("lsl")) {
3182 Error(getLoc(), "only 'lsl #+N' valid after immediate");
3183 return MatchOperand_ParseFail;
3184 }
3185
3186 // Eat 'lsl'
3187 Lex();
3188
3189 parseOptionalToken(AsmToken::Hash);
3190
3191 if (getTok().isNot(AsmToken::Integer)) {
3192 Error(getLoc(), "only 'lsl #+N' valid after immediate");
3193 return MatchOperand_ParseFail;
3194 }
3195
3196 int64_t ShiftAmount = getTok().getIntVal();
3197
3198 if (ShiftAmount < 0) {
3199 Error(getLoc(), "positive shift amount required");
3200 return MatchOperand_ParseFail;
3201 }
3202 Lex(); // Eat the number
3203
3204 // Just in case the optional lsl #0 is used for immediates other than zero.
3205 if (ShiftAmount == 0 && Imm != nullptr) {
3206 Operands.push_back(
3207 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3208 return MatchOperand_Success;
3209 }
3210
3211 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3212 getLoc(), getContext()));
3213 return MatchOperand_Success;
3214}
3215
3216/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3217/// suggestion to help common typos.
3218AArch64CC::CondCode
3219AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3220 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3221 .Case("eq", AArch64CC::EQ)
3222 .Case("ne", AArch64CC::NE)
3223 .Case("cs", AArch64CC::HS)
3224 .Case("hs", AArch64CC::HS)
3225 .Case("cc", AArch64CC::LO)
3226 .Case("lo", AArch64CC::LO)
3227 .Case("mi", AArch64CC::MI)
3228 .Case("pl", AArch64CC::PL)
3229 .Case("vs", AArch64CC::VS)
3230 .Case("vc", AArch64CC::VC)
3231 .Case("hi", AArch64CC::HI)
3232 .Case("ls", AArch64CC::LS)
3233 .Case("ge", AArch64CC::GE)
3234 .Case("lt", AArch64CC::LT)
3235 .Case("gt", AArch64CC::GT)
3236 .Case("le", AArch64CC::LE)
3237 .Case("al", AArch64CC::AL)
3238 .Case("nv", AArch64CC::NV)
3239 .Default(AArch64CC::Invalid);
3240
3241 if (CC == AArch64CC::Invalid &&
3242 getSTI().getFeatureBits()[AArch64::FeatureSVE]) {
3243 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3244 .Case("none", AArch64CC::EQ)
3245 .Case("any", AArch64CC::NE)
3246 .Case("nlast", AArch64CC::HS)
3247 .Case("last", AArch64CC::LO)
3248 .Case("first", AArch64CC::MI)
3249 .Case("nfrst", AArch64CC::PL)
3250 .Case("pmore", AArch64CC::HI)
3251 .Case("plast", AArch64CC::LS)
3252 .Case("tcont", AArch64CC::GE)
3253 .Case("tstop", AArch64CC::LT)
3254 .Default(AArch64CC::Invalid);
3255
3256 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3257 Suggestion = "nfrst";
3258 }
3259 return CC;
3260}
3261
3262/// parseCondCode - Parse a Condition Code operand.
3263bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3264 bool invertCondCode) {
3265 SMLoc S = getLoc();
3266 const AsmToken &Tok = getTok();
3267 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")(static_cast <bool> (Tok.is(AsmToken::Identifier) &&
"Token is not an Identifier") ? void (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3267
, __extension__ __PRETTY_FUNCTION__))
;
3268
3269 StringRef Cond = Tok.getString();
3270 std::string Suggestion;
3271 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3272 if (CC == AArch64CC::Invalid) {
3273 std::string Msg = "invalid condition code";
3274 if (!Suggestion.empty())
3275 Msg += ", did you mean " + Suggestion + "?";
3276 return TokError(Msg);
3277 }
3278 Lex(); // Eat identifier token.
3279
3280 if (invertCondCode) {
3281 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3282 return TokError("condition codes AL and NV are invalid for this instruction");
3283 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
3284 }
3285
3286 Operands.push_back(
3287 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3288 return false;
3289}
3290
3291OperandMatchResultTy
3292AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3293 const AsmToken &Tok = getTok();
3294 SMLoc S = getLoc();
3295
3296 if (Tok.isNot(AsmToken::Identifier)) {
3297 TokError("invalid operand for instruction");
3298 return MatchOperand_ParseFail;
3299 }
3300
3301 unsigned PStateImm = -1;
3302 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3303 if (SVCR && SVCR->haveFeatures(getSTI().getFeatureBits()))
3304 PStateImm = SVCR->Encoding;
3305
3306 Operands.push_back(
3307 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3308 Lex(); // Eat identifier token.
3309 return MatchOperand_Success;
3310}
3311
3312OperandMatchResultTy
3313AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3314 const AsmToken &Tok = getTok();
3315 SMLoc S = getLoc();
3316
3317 StringRef Name = Tok.getString();
3318
3319 if (Name.equals_insensitive("za") || Name.startswith_insensitive("za.")) {
3320 Lex(); // eat "za[.(b|h|s|d)]"
3321 unsigned ElementWidth = 0;
3322 auto DotPosition = Name.find('.');
3323 if (DotPosition != StringRef::npos) {
3324 const auto &KindRes =
3325 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3326 if (!KindRes) {
3327 TokError(
3328 "Expected the register to be followed by element width suffix");
3329 return MatchOperand_ParseFail;
3330 }
3331 ElementWidth = KindRes->second;
3332 }
3333 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3334 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3335 getContext()));
3336 if (getLexer().is(AsmToken::LBrac)) {
3337 // There's no comma after matrix operand, so we can parse the next operand
3338 // immediately.
3339 if (parseOperand(Operands, false, false))
3340 return MatchOperand_NoMatch;
3341 }
3342 return MatchOperand_Success;
3343 }
3344
3345 // Try to parse matrix register.
3346 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3347 if (!Reg)
3348 return MatchOperand_NoMatch;
3349
3350 size_t DotPosition = Name.find('.');
3351 assert(DotPosition != StringRef::npos && "Unexpected register")(static_cast <bool> (DotPosition != StringRef::npos &&
"Unexpected register") ? void (0) : __assert_fail ("DotPosition != StringRef::npos && \"Unexpected register\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3351
, __extension__ __PRETTY_FUNCTION__))
;
3352
3353 StringRef Head = Name.take_front(DotPosition);
3354 StringRef Tail = Name.drop_front(DotPosition);
3355 StringRef RowOrColumn = Head.take_back();
3356
3357 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn)
3358 .Case("h", MatrixKind::Row)
3359 .Case("v", MatrixKind::Col)
3360 .Default(MatrixKind::Tile);
3361
3362 // Next up, parsing the suffix
3363 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3364 if (!KindRes) {
3365 TokError("Expected the register to be followed by element width suffix");
3366 return MatchOperand_ParseFail;
3367 }
3368 unsigned ElementWidth = KindRes->second;
3369
3370 Lex();
3371
3372 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3373 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3374
3375 if (getLexer().is(AsmToken::LBrac)) {
3376 // There's no comma after matrix operand, so we can parse the next operand
3377 // immediately.
3378 if (parseOperand(Operands, false, false))
3379 return MatchOperand_NoMatch;
3380 }
3381 return MatchOperand_Success;
3382}
3383
3384/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3385/// them if present.
3386OperandMatchResultTy
3387AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3388 const AsmToken &Tok = getTok();
3389 std::string LowerID = Tok.getString().lower();
3390 AArch64_AM::ShiftExtendType ShOp =
3391 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3392 .Case("lsl", AArch64_AM::LSL)
3393 .Case("lsr", AArch64_AM::LSR)
3394 .Case("asr", AArch64_AM::ASR)
3395 .Case("ror", AArch64_AM::ROR)
3396 .Case("msl", AArch64_AM::MSL)
3397 .Case("uxtb", AArch64_AM::UXTB)
3398 .Case("uxth", AArch64_AM::UXTH)
3399 .Case("uxtw", AArch64_AM::UXTW)
3400 .Case("uxtx", AArch64_AM::UXTX)
3401 .Case("sxtb", AArch64_AM::SXTB)
3402 .Case("sxth", AArch64_AM::SXTH)
3403 .Case("sxtw", AArch64_AM::SXTW)
3404 .Case("sxtx", AArch64_AM::SXTX)
3405 .Default(AArch64_AM::InvalidShiftExtend);
3406
3407 if (ShOp == AArch64_AM::InvalidShiftExtend)
3408 return MatchOperand_NoMatch;
3409
3410 SMLoc S = Tok.getLoc();
3411 Lex();
3412
3413 bool Hash = parseOptionalToken(AsmToken::Hash);
3414
3415 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3416 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3417 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3418 ShOp == AArch64_AM::MSL) {
3419 // We expect a number here.
3420 TokError("expected #imm after shift specifier");
3421 return MatchOperand_ParseFail;
3422 }
3423
3424 // "extend" type operations don't need an immediate, #0 is implicit.
3425 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3426 Operands.push_back(
3427 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3428 return MatchOperand_Success;
3429 }
3430
3431 // Make sure we do actually have a number, identifier or a parenthesized
3432 // expression.
3433 SMLoc E = getLoc();
3434 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3435 !getTok().is(AsmToken::Identifier)) {
3436 Error(E, "expected integer shift amount");
3437 return MatchOperand_ParseFail;
3438 }
3439
3440 const MCExpr *ImmVal;
3441 if (getParser().parseExpression(ImmVal))
3442 return MatchOperand_ParseFail;
3443
3444 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3445 if (!MCE) {
3446 Error(E, "expected constant '#imm' after shift specifier");
3447 return MatchOperand_ParseFail;
3448 }
3449
3450 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3451 Operands.push_back(AArch64Operand::CreateShiftExtend(
3452 ShOp, MCE->getValue(), true, S, E, getContext()));
3453 return MatchOperand_Success;
3454}
3455
3456static const struct Extension {
3457 const char *Name;
3458 const FeatureBitset Features;
3459} ExtensionMap[] = {
3460 {"crc", {AArch64::FeatureCRC}},
3461 {"sm4", {AArch64::FeatureSM4}},
3462 {"sha3", {AArch64::FeatureSHA3}},
3463 {"sha2", {AArch64::FeatureSHA2}},
3464 {"aes", {AArch64::FeatureAES}},
3465 {"crypto", {AArch64::FeatureCrypto}},
3466 {"fp", {AArch64::FeatureFPARMv8}},
3467 {"simd", {AArch64::FeatureNEON}},
3468 {"ras", {AArch64::FeatureRAS}},
3469 {"lse", {AArch64::FeatureLSE}},
3470 {"predres", {AArch64::FeaturePredRes}},
3471 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3472 {"mte", {AArch64::FeatureMTE}},
3473 {"memtag", {AArch64::FeatureMTE}},
3474 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3475 {"pan", {AArch64::FeaturePAN}},
3476 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3477 {"ccpp", {AArch64::FeatureCCPP}},
3478 {"rcpc", {AArch64::FeatureRCPC}},
3479 {"rng", {AArch64::FeatureRandGen}},
3480 {"sve", {AArch64::FeatureSVE}},
3481 {"sve2", {AArch64::FeatureSVE2}},
3482 {"sve2-aes", {AArch64::FeatureSVE2AES}},
3483 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3484 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3485 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3486 {"sve2p1", {AArch64::FeatureSVE2p1}},
3487 {"b16b16", {AArch64::FeatureB16B16}},
3488 {"ls64", {AArch64::FeatureLS64}},
3489 {"xs", {AArch64::FeatureXS}},
3490 {"pauth", {AArch64::FeaturePAuth}},
3491 {"flagm", {AArch64::FeatureFlagM}},
3492 {"rme", {AArch64::FeatureRME}},
3493 {"sme", {AArch64::FeatureSME}},
3494 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3495 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3496 {"sme2", {AArch64::FeatureSME2}},
3497 {"sme2p1", {AArch64::FeatureSME2p1}},
3498 {"hbc", {AArch64::FeatureHBC}},
3499 {"mops", {AArch64::FeatureMOPS}},
3500 // FIXME: Unsupported extensions
3501 {"lor", {}},
3502 {"rdma", {}},
3503 {"profile", {}},
3504};
3505
3506static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3507 if (FBS[AArch64::HasV8_0aOps])
3508 Str += "ARMv8a";
3509 if (FBS[AArch64::HasV8_1aOps])
3510 Str += "ARMv8.1a";
3511 else if (FBS[AArch64::HasV8_2aOps])
3512 Str += "ARMv8.2a";
3513 else if (FBS[AArch64::HasV8_3aOps])
3514 Str += "ARMv8.3a";
3515 else if (FBS[AArch64::HasV8_4aOps])
3516 Str += "ARMv8.4a";
3517 else if (FBS[AArch64::HasV8_5aOps])
3518 Str += "ARMv8.5a";
3519 else if (FBS[AArch64::HasV8_6aOps])
3520 Str += "ARMv8.6a";
3521 else if (FBS[AArch64::HasV8_7aOps])
3522 Str += "ARMv8.7a";
3523 else if (FBS[AArch64::HasV8_8aOps])
3524 Str += "ARMv8.8a";
3525 else if (FBS[AArch64::HasV9_0aOps])
3526 Str += "ARMv9-a";
3527 else if (FBS[AArch64::HasV9_1aOps])
3528 Str += "ARMv9.1a";
3529 else if (FBS[AArch64::HasV9_2aOps])
3530 Str += "ARMv9.2a";
3531 else if (FBS[AArch64::HasV9_3aOps])
3532 Str += "ARMv9.3a";
3533 else if (FBS[AArch64::HasV8_0rOps])
3534 Str += "ARMv8r";
3535 else {
3536 SmallVector<std::string, 2> ExtMatches;
3537 for (const auto& Ext : ExtensionMap) {
3538 // Use & in case multiple features are enabled
3539 if ((FBS & Ext.Features) != FeatureBitset())
3540 ExtMatches.push_back(Ext.Name);
3541 }
3542 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3543 }
3544}
3545
3546void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3547 SMLoc S) {
3548 const uint16_t Op2 = Encoding & 7;
3549 const uint16_t Cm = (Encoding & 0x78) >> 3;
3550 const uint16_t Cn = (Encoding & 0x780) >> 7;
3551 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3552
3553 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3554
3555 Operands.push_back(
3556 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3557 Operands.push_back(
3558 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3559 Operands.push_back(
3560 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3561 Expr = MCConstantExpr::create(Op2, getContext());
3562 Operands.push_back(
3563 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3564}
3565
3566/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3567/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3568bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3569 OperandVector &Operands) {
3570 if (Name.contains('.'))
3571 return TokError("invalid operand");
3572
3573 Mnemonic = Name;
3574 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3575
3576 const AsmToken &Tok = getTok();
3577 StringRef Op = Tok.getString();
3578 SMLoc S = Tok.getLoc();
3579
3580 if (Mnemonic == "ic") {
3581 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3582 if (!IC)
3583 return TokError("invalid operand for IC instruction");
3584 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3585 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3586 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
3587 return TokError(Str);
3588 }
3589 createSysAlias(IC->Encoding, Operands, S);
3590 } else if (Mnemonic == "dc") {
3591 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3592 if (!DC)
3593 return TokError("invalid operand for DC instruction");
3594 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3595 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3596 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3597 return TokError(Str);
3598 }
3599 createSysAlias(DC->Encoding, Operands, S);
3600 } else if (Mnemonic == "at") {
3601 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3602 if (!AT)
3603 return TokError("invalid operand for AT instruction");
3604 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3605 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3606 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
3607 return TokError(Str);
3608 }
3609 createSysAlias(AT->Encoding, Operands, S);
3610 } else if (Mnemonic == "tlbi") {
3611 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3612 if (!TLBI)
3613 return TokError("invalid operand for TLBI instruction");
3614 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3615 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3616 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
3617 return TokError(Str);
3618 }
3619 createSysAlias(TLBI->Encoding, Operands, S);
3620 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
3621 const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
3622 if (!PRCTX)
3623 return TokError("invalid operand for prediction restriction instruction");
3624 else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
3625 std::string Str(
3626 Mnemonic.upper() + std::string(PRCTX->Name) + " requires: ");
3627 setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
3628 return TokError(Str);
3629 }
3630 uint16_t PRCTX_Op2 =
3631 Mnemonic == "cfp" ? 4 :
3632 Mnemonic == "dvp" ? 5 :
3633 Mnemonic == "cpp" ? 7 :
3634 0;
3635 assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction")(static_cast <bool> (PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction"
) ? void (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3635
, __extension__ __PRETTY_FUNCTION__))
;
3636 createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
3637 }
3638
3639 Lex(); // Eat operand.
3640
3641 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3642 bool HasRegister = false;
3643
3644 // Check for the optional register operand.
3645 if (parseOptionalToken(AsmToken::Comma)) {
3646 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3647 return TokError("expected register operand");
3648 HasRegister = true;
3649 }
3650
3651 if (ExpectRegister && !HasRegister)
3652 return TokError("specified " + Mnemonic + " op requires a register");
3653 else if (!ExpectRegister && HasRegister)
3654 return TokError("specified " + Mnemonic + " op does not use a register");
3655
3656 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3657 return true;
3658
3659 return false;
3660}
3661
3662OperandMatchResultTy
3663AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3664 MCAsmParser &Parser = getParser();
3665 const AsmToken &Tok = getTok();
3666
3667 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3668 TokError("'csync' operand expected");
3669 return MatchOperand_ParseFail;
3670 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3671 // Immediate operand.
3672 const MCExpr *ImmVal;
3673 SMLoc ExprLoc = getLoc();
3674 AsmToken IntTok = Tok;
3675 if (getParser().parseExpression(ImmVal))
3676 return MatchOperand_ParseFail;
3677 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3678 if (!MCE) {
3679 Error(ExprLoc, "immediate value expected for barrier operand");
3680 return MatchOperand_ParseFail;
3681 }
3682 int64_t Value = MCE->getValue();
3683 if (Mnemonic == "dsb" && Value > 15) {
3684 // This case is a no match here, but it might be matched by the nXS
3685 // variant. Deliberately not unlex the optional '#' as it is not necessary
3686 // to characterize an integer immediate.
3687 Parser.getLexer().UnLex(IntTok);
3688 return MatchOperand_NoMatch;
3689 }
3690 if (Value < 0 || Value > 15) {
3691 Error(ExprLoc, "barrier operand out of range");
3692 return MatchOperand_ParseFail;
3693 }
3694 auto DB = AArch64DB::lookupDBByEncoding(Value);
3695 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3696 ExprLoc, getContext(),
3697 false /*hasnXSModifier*/));
3698 return MatchOperand_Success;
3699 }
3700
3701 if (Tok.isNot(AsmToken::Identifier)) {
3702 TokError("invalid operand for instruction");
3703 return MatchOperand_ParseFail;
3704 }
3705
3706 StringRef Operand = Tok.getString();
3707 auto TSB = AArch64TSB::lookupTSBByName(Operand);
3708 auto DB = AArch64DB::lookupDBByName(Operand);
3709 // The only valid named option for ISB is 'sy'
3710 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3711 TokError("'sy' or #imm operand expected");
3712 return MatchOperand_ParseFail;
3713 // The only valid named option for TSB is 'csync'
3714 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3715 TokError("'csync' operand expected");
3716 return MatchOperand_ParseFail;
3717 } else if (!DB && !TSB) {
3718 if (Mnemonic == "dsb") {
3719 // This case is a no match here, but it might be matched by the nXS
3720 // variant.
3721 return MatchOperand_NoMatch;
3722 }
3723 TokError("invalid barrier option name");
3724 return MatchOperand_ParseFail;
3725 }
3726
3727 Operands.push_back(AArch64Operand::CreateBarrier(
3728 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3729 getContext(), false /*hasnXSModifier*/));
3730 Lex(); // Consume the option
3731
3732 return MatchOperand_Success;
3733}
3734
3735OperandMatchResultTy
3736AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3737 const AsmToken &Tok = getTok();
3738
3739 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands")(static_cast <bool> (Mnemonic == "dsb" && "Instruction does not accept nXS operands"
) ? void (0) : __assert_fail ("Mnemonic == \"dsb\" && \"Instruction does not accept nXS operands\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3739
, __extension__ __PRETTY_FUNCTION__))
;
3740 if (Mnemonic != "dsb")
3741 return MatchOperand_ParseFail;
3742
3743 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3744 // Immediate operand.
3745 const MCExpr *ImmVal;
3746 SMLoc ExprLoc = getLoc();
3747 if (getParser().parseExpression(ImmVal))
3748 return MatchOperand_ParseFail;
3749 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3750 if (!MCE) {
3751 Error(ExprLoc, "immediate value expected for barrier operand");
3752 return MatchOperand_ParseFail;
3753 }
3754 int64_t Value = MCE->getValue();
3755 // v8.7-A DSB in the nXS variant accepts only the following immediate
3756 // values: 16, 20, 24, 28.
3757 if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
3758 Error(ExprLoc, "barrier operand out of range");
3759 return MatchOperand_ParseFail;
3760 }
3761 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
3762 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
3763 ExprLoc, getContext(),
3764 true /*hasnXSModifier*/));
3765 return MatchOperand_Success;
3766 }
3767
3768 if (Tok.isNot(AsmToken::Identifier)) {
3769 TokError("invalid operand for instruction");
3770 return MatchOperand_ParseFail;
3771 }
3772
3773 StringRef Operand = Tok.getString();
3774 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
3775
3776 if (!DB) {
3777 TokError("invalid barrier option name");
3778 return MatchOperand_ParseFail;
3779 }
3780
3781 Operands.push_back(
3782 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
3783 getContext(), true /*hasnXSModifier*/));
3784 Lex(); // Consume the option
3785
3786 return MatchOperand_Success;
3787}
3788
3789OperandMatchResultTy
3790AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3791 const AsmToken &Tok = getTok();
3792
3793 if (Tok.isNot(AsmToken::Identifier))
3794 return MatchOperand_NoMatch;
3795
3796 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
3797 return MatchOperand_NoMatch;
3798
3799 int MRSReg, MSRReg;
3800 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3801 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3802 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3803 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3804 } else
3805 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3806
3807 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3808 unsigned PStateImm = -1;
3809 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3810 PStateImm = PState->Encoding;
3811
3812 Operands.push_back(
3813 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3814 PStateImm, getContext()));
3815 Lex(); // Eat identifier
3816
3817 return MatchOperand_Success;
3818}
3819
3820/// tryParseNeonVectorRegister - Parse a vector register operand.
3821bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3822 if (getTok().isNot(AsmToken::Identifier))
3823 return true;
3824
3825 SMLoc S = getLoc();
3826 // Check for a vector register specifier first.
3827 StringRef Kind;
3828 unsigned Reg;
3829 OperandMatchResultTy Res =
3830 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3831 if (Res != MatchOperand_Success)
3832 return true;
3833
3834 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3835 if (!KindRes)
3836 return true;
3837
3838 unsigned ElementWidth = KindRes->second;
3839 Operands.push_back(
3840 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3841 S, getLoc(), getContext()));
3842
3843 // If there was an explicit qualifier, that goes on as a literal text
3844 // operand.
3845 if (!Kind.empty())
3846 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
3847
3848 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3849}
3850
3851OperandMatchResultTy
3852AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3853 SMLoc SIdx = getLoc();
3854 if (parseOptionalToken(AsmToken::LBrac)) {
3855 const MCExpr *ImmVal;
3856 if (getParser().parseExpression(ImmVal))
3857 return MatchOperand_NoMatch;
3858 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3859 if (!MCE) {
3860 TokError("immediate value expected for vector index");
3861 return MatchOperand_ParseFail;;
3862 }
3863
3864 SMLoc E = getLoc();
3865
3866 if (parseToken(AsmToken::RBrac, "']' expected"))
3867 return MatchOperand_ParseFail;;
3868
3869 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3870 E, getContext()));
3871 return MatchOperand_Success;
3872 }
3873
3874 return MatchOperand_NoMatch;
3875}
3876
3877// tryParseVectorRegister - Try to parse a vector register name with
3878// optional kind specifier. If it is a register specifier, eat the token
3879// and return it.
3880OperandMatchResultTy
3881AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3882 RegKind MatchKind) {
3883 const AsmToken &Tok = getTok();
3884
3885 if (Tok.isNot(AsmToken::Identifier))
3886 return MatchOperand_NoMatch;
3887
3888 StringRef Name = Tok.getString();
3889 // If there is a kind specifier, it's separated from the register name by
3890 // a '.'.
3891 size_t Start = 0, Next = Name.find('.');
3892 StringRef Head = Name.slice(Start, Next);
3893 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3894
3895 if (RegNum) {
3896 if (Next != StringRef::npos) {
3897 Kind = Name.slice(Next, StringRef::npos);
3898 if (!isValidVectorKind(Kind, MatchKind)) {
3899 TokError("invalid vector kind qualifier");
3900 return MatchOperand_ParseFail;
3901 }
3902 }
3903 Lex(); // Eat the register token.
3904
3905 Reg = RegNum;
3906 return MatchOperand_Success;
3907 }
3908
3909 return MatchOperand_NoMatch;
3910}
3911
3912/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3913template <RegKind RK> OperandMatchResultTy
3914AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3915 // Check for a SVE predicate register specifier first.
3916 const SMLoc S = getLoc();
3917 StringRef Kind;
3918 unsigned RegNum;
3919 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
3920 if (Res != MatchOperand_Success)
3921 return Res;
3922
3923 const auto &KindRes = parseVectorKind(Kind, RK);
3924 if (!KindRes)
3925 return MatchOperand_NoMatch;
3926
3927 unsigned ElementWidth = KindRes->second;
3928 Operands.push_back(AArch64Operand::CreateVectorReg(
3929 RegNum, RK, ElementWidth, S,
3930 getLoc(), getContext()));
3931
3932 if (getLexer().is(AsmToken::LBrac)) {
3933 if (RK == RegKind::SVEPredicateAsCounter) {
3934 OperandMatchResultTy ResIndex = tryParseVectorIndex(Operands);
3935 if (ResIndex == MatchOperand_Success)
3936 return MatchOperand_Success;
3937 } else {
3938 // Indexed predicate, there's no comma so try parse the next operand
3939 // immediately.
3940 if (parseOperand(Operands, false, false))
3941 return MatchOperand_NoMatch;
3942 }
3943 }
3944
3945 // Not all predicates are followed by a '/m' or '/z'.
3946 if (getTok().isNot(AsmToken::Slash))
3947 return MatchOperand_Success;
3948
3949 // But when they do they shouldn't have an element type suffix.
3950 if (!Kind.empty()) {
3951 Error(S, "not expecting size suffix");
3952 return MatchOperand_ParseFail;
3953 }
3954
3955 // Add a literal slash as operand
3956 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
3957
3958 Lex(); // Eat the slash.
3959
3960 // Zeroing or merging?
3961 auto Pred = getTok().getString().lower();
3962 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z") {
3963 Error(getLoc(), "expecting 'z' predication");
3964 return MatchOperand_ParseFail;
3965 }
3966
3967 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m") {
3968 Error(getLoc(), "expecting 'm' or 'z' predication");
3969 return MatchOperand_ParseFail;
3970 }
3971
3972 // Add zero/merge token.
3973 const char *ZM = Pred == "z" ? "z" : "m";
3974 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
3975
3976 Lex(); // Eat zero/merge token.
3977 return MatchOperand_Success;
3978}
3979
3980/// parseRegister - Parse a register operand.
3981bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3982 // Try for a Neon vector register.
3983 if (!tryParseNeonVectorRegister(Operands))
3984 return false;
3985
3986 if (tryParseZTOperand(Operands) == MatchOperand_Success)
3987 return false;
3988
3989 // Otherwise try for a scalar register.
3990 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3991 return false;
3992
3993 return true;
3994}
3995
3996bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3997 bool HasELFModifier = false;
3998 AArch64MCExpr::VariantKind RefKind;
3999
4000 if (parseOptionalToken(AsmToken::Colon)) {
4001 HasELFModifier = true;
4002
4003 if (getTok().isNot(AsmToken::Identifier))
4004 return TokError("expect relocation specifier in operand after ':'");
4005
4006 std::string LowerCase = getTok().getIdentifier().lower();
4007 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
4008 .Case("lo12", AArch64MCExpr::VK_LO12)
4009 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4010 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4011 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4012 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4013 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4014 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4015 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4016 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4017 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4018 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4019 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4020 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4021 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4022 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4023 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4024 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4025 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4026 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4027 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4028 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4029 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4030 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4031 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4032 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4033 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4034 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4035 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4036 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4037 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4038 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4039 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4040 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4041 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4042 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4043 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4044 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
4045 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4046 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4047 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
4048 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4049 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4050 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4051 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
4052 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4053 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4054 .Default(AArch64MCExpr::VK_INVALID);
4055
4056 if (RefKind == AArch64MCExpr::VK_INVALID)
4057 return TokError("expect relocation specifier in operand after ':'");
4058
4059 Lex(); // Eat identifier
4060
4061 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4062 return true;
4063 }
4064
4065 if (getParser().parseExpression(ImmVal))
4066 return true;
4067
4068 if (HasELFModifier)
4069 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4070
4071 return false;
4072}
4073
4074OperandMatchResultTy
4075AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4076 if (getTok().isNot(AsmToken::LCurly))
4077 return MatchOperand_NoMatch;
4078
4079 auto ParseMatrixTile = [this](unsigned &Reg, unsigned &ElementWidth) {
4080 StringRef Name = getTok().getString();
4081 size_t DotPosition = Name.find('.');
4082 if (DotPosition == StringRef::npos)
4083 return MatchOperand_NoMatch;
4084
4085 unsigned RegNum = matchMatrixTileListRegName(Name);
4086 if (!RegNum)
4087 return MatchOperand_NoMatch;
4088
4089 StringRef Tail = Name.drop_front(DotPosition);
4090 const Optional<std::pair<int, int>> &KindRes =
4091 parseVectorKind(Tail, RegKind::Matrix);
4092 if (!KindRes) {
4093 TokError("Expected the register to be followed by element width suffix");
4094 return MatchOperand_ParseFail;
4095 }
4096 ElementWidth = KindRes->second;
4097 Reg = RegNum;
4098 Lex(); // Eat the register.
4099 return MatchOperand_Success;
4100 };
4101
4102 SMLoc S = getLoc();
4103 auto LCurly = getTok();
4104 Lex(); // Eat left bracket token.
4105
4106 // Empty matrix list
4107 if (parseOptionalToken(AsmToken::RCurly)) {
4108 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4109 /*RegMask=*/0, S, getLoc(), getContext()));
4110 return MatchOperand_Success;
4111 }
4112
4113 // Try parse {za} alias early
4114 if (getTok().getString().equals_insensitive("za")) {
4115 Lex(); // Eat 'za'
4116
4117 if (parseToken(AsmToken::RCurly, "'}' expected"))
4118 return MatchOperand_ParseFail;
4119
4120 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4121 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4122 return MatchOperand_Success;
4123 }
4124
4125 SMLoc TileLoc = getLoc();
4126
4127 unsigned FirstReg, ElementWidth;
4128 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4129 if (ParseRes != MatchOperand_Success) {
4130 getLexer().UnLex(LCurly);
4131 return ParseRes;
4132 }
4133
4134 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4135
4136 unsigned PrevReg = FirstReg;
4137
4138 SmallSet<unsigned, 8> DRegs;
4139 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4140
4141 SmallSet<unsigned, 8> SeenRegs;
4142 SeenRegs.insert(FirstReg);
4143
4144 while (parseOptionalToken(AsmToken::Comma)) {
4145 TileLoc = getLoc();
4146 unsigned Reg, NextElementWidth;
4147 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4148 if (ParseRes != MatchOperand_Success)
4149 return ParseRes;
4150
4151 // Element size must match on all regs in the list.
4152 if (ElementWidth != NextElementWidth) {
4153 Error(TileLoc, "mismatched register size suffix");
4154 return MatchOperand_ParseFail;
4155 }
4156
4157 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4158 Warning(TileLoc, "tile list not in ascending order");
4159
4160 if (SeenRegs.contains(Reg))
4161 Warning(TileLoc, "duplicate tile in list");
4162 else {
4163 SeenRegs.insert(Reg);
4164 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4165 }
4166
4167 PrevReg = Reg;
4168 }
4169
4170 if (parseToken(AsmToken::RCurly, "'}' expected"))
4171 return MatchOperand_ParseFail;
4172
4173 unsigned RegMask = 0;
4174 for (auto Reg : DRegs)
4175 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4176 RI->getEncodingValue(AArch64::ZAD0));
4177 Operands.push_back(
4178 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4179
4180 return MatchOperand_Success;
4181}
4182
4183template <RegKind VectorKind>
4184OperandMatchResultTy
4185AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4186 bool ExpectMatch) {
4187 MCAsmParser &Parser = getParser();
4188 if (!getTok().is(AsmToken::LCurly))
4189 return MatchOperand_NoMatch;
4190
4191 // Wrapper around parse function
4192 auto ParseVector = [this](unsigned &Reg, StringRef &Kind, SMLoc Loc,
4193 bool NoMatchIsError) {
4194 auto RegTok = getTok();
4195 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4196 if (ParseRes == MatchOperand_Success) {
4197 if (parseVectorKind(Kind, VectorKind))
4198 return ParseRes;
4199 llvm_unreachable("Expected a valid vector kind")::llvm::llvm_unreachable_internal("Expected a valid vector kind"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 4199
)
;
4200 }
4201
4202 if (RegTok.is(AsmToken::Identifier) && ParseRes == MatchOperand_NoMatch &&
4203 RegTok.getString().equals_insensitive("zt0"))
4204 return MatchOperand_NoMatch;
4205
4206 if (RegTok.isNot(AsmToken::Identifier) ||
4207 ParseRes == MatchOperand_ParseFail ||
4208 (ParseRes == MatchOperand_NoMatch && NoMatchIsError &&
4209 !RegTok.getString().startswith_insensitive("za"))) {
4210 Error(Loc, "vector register expected");
4211 return MatchOperand_ParseFail;
4212 }
4213
4214 return MatchOperand_NoMatch;
4215 };
4216
4217 int NumRegs = getNumRegsForRegKind(VectorKind);
4218 SMLoc S = getLoc();
4219 auto LCurly = getTok();
4220 Lex(); // Eat left bracket token.
4221
4222 StringRef Kind;
4223 unsigned FirstReg;
4224 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4225
4226 // Put back the original left bracket if there was no match, so that
4227 // different types of list-operands can be matched (e.g. SVE, Neon).
4228 if (ParseRes == MatchOperand_NoMatch)
4229 Parser.getLexer().UnLex(LCurly);
4230
4231 if (ParseRes != MatchOperand_Success)
4232 return ParseRes;
4233
4234 int64_t PrevReg = FirstReg;
4235 unsigned Count = 1;
4236
4237 if (parseOptionalToken(AsmToken::Minus)) {
4238 SMLoc Loc = getLoc();
4239 StringRef NextKind;
4240
4241 unsigned Reg;
4242 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4243 if (ParseRes != MatchOperand_Success)
4244 return ParseRes;
4245
4246 // Any Kind suffices must match on all regs in the list.
4247 if (Kind != NextKind) {
4248 Error(Loc, "mismatched register size suffix");
4249 return MatchOperand_ParseFail;
4250 }
4251
4252 unsigned Space =
4253 (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4254
4255 if (Space == 0 || Space > 3) {
4256 Error(Loc, "invalid number of vectors");
4257 return MatchOperand_ParseFail;
4258 }
4259
4260 Count += Space;
4261 }
4262 else {
4263 while (parseOptionalToken(AsmToken::Comma)) {
4264 SMLoc Loc = getLoc();
4265 StringRef NextKind;
4266 unsigned Reg;
4267 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4268 if (ParseRes != MatchOperand_Success)
4269 return ParseRes;
4270
4271 // Any Kind suffices must match on all regs in the list.
4272 if (Kind != NextKind) {
4273 Error(Loc, "mismatched register size suffix");
4274 return MatchOperand_ParseFail;
4275 }
4276
4277 // Registers must be incremental (with wraparound at 31)
4278 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
4279 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) %
4280 NumRegs) {
4281 Error(Loc, "registers must be sequential");
4282 return MatchOperand_ParseFail;
4283 }
4284
4285 PrevReg = Reg;
4286 ++Count;
4287 }
4288 }
4289
4290 if (parseToken(AsmToken::RCurly, "'}' expected"))
4291 return MatchOperand_ParseFail;
4292
4293 if (Count > 4) {
4294 Error(S, "invalid number of vectors");
4295 return MatchOperand_ParseFail;
4296 }
4297
4298 unsigned NumElements = 0;
4299 unsigned ElementWidth = 0;
4300 if (!Kind.empty()) {
4301 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4302 std::tie(NumElements, ElementWidth) = *VK;
4303 }
4304
4305 Operands.push_back(AArch64Operand::CreateVectorList(
4306 FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
4307 getContext()));
4308
4309 return MatchOperand_Success;
4310}
4311
4312/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4313bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4314 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4315 if (ParseRes != MatchOperand_Success)
4316 return true;
4317
4318 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
4319}
4320
4321OperandMatchResultTy
4322AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4323 SMLoc StartLoc = getLoc();
4324
4325 unsigned RegNum;
4326 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4327 if (Res != MatchOperand_Success)
4328 return Res;
4329
4330 if (!parseOptionalToken(AsmToken::Comma)) {
4331 Operands.push_back(AArch64Operand::CreateReg(
4332 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4333 return MatchOperand_Success;
4334 }
4335
4336 parseOptionalToken(AsmToken::Hash);
4337
4338 if (getTok().isNot(AsmToken::Integer)) {
4339 Error(getLoc(), "index must be absent or #0");
4340 return MatchOperand_ParseFail;
4341 }
4342
4343 const MCExpr *ImmVal;
4344 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4345 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
4346 Error(getLoc(), "index must be absent or #0");
4347 return MatchOperand_ParseFail;
4348 }
4349
4350 Operands.push_back(AArch64Operand::CreateReg(
4351 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4352 return MatchOperand_Success;
4353}
4354
4355OperandMatchResultTy
4356AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4357 SMLoc StartLoc = getLoc();
4358 const AsmToken &Tok = getTok();
4359 std::string Name = Tok.getString().lower();
4360
4361 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4362
4363 if (RegNum == 0)
4364 return MatchOperand_NoMatch;
4365
4366 Operands.push_back(AArch64Operand::CreateReg(
4367 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4368 Lex(); // Eat identifier token.
4369
4370 // Check if register is followed by an index
4371 if (parseOptionalToken(AsmToken::LBrac)) {
4372 const MCExpr *ImmVal;
4373 if (getParser().parseExpression(ImmVal))
4374 return MatchOperand_NoMatch;
4375 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4376 if (!MCE) {
4377 TokError("immediate value expected for vector index");
4378 return MatchOperand_ParseFail;
4379 }
4380 if (parseToken(AsmToken::RBrac, "']' expected"))
4381 return MatchOperand_ParseFail;
4382
4383 Operands.push_back(AArch64Operand::CreateImm(
4384 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4385 getLoc(), getContext()));
4386 }
4387
4388 return MatchOperand_Success;
4389}
4390
4391template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4392OperandMatchResultTy
4393AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4394 SMLoc StartLoc = getLoc();
4395
4396 unsigned RegNum;
4397 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4398 if (Res != MatchOperand_Success)
4399 return Res;
4400
4401 // No shift/extend is the default.
4402 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4403 Operands.push_back(AArch64Operand::CreateReg(
4404 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4405 return MatchOperand_Success;
4406 }
4407
4408 // Eat the comma
4409 Lex();
4410
4411 // Match the shift
4412 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
4413 Res = tryParseOptionalShiftExtend(ExtOpnd);
4414 if (Res != MatchOperand_Success)
4415 return Res;
4416
4417 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4418 Operands.push_back(AArch64Operand::CreateReg(
4419 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4420 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4421 Ext->hasShiftExtendAmount()));
4422
4423 return MatchOperand_Success;
4424}
4425
4426bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4427 MCAsmParser &Parser = getParser();
4428
4429 // Some SVE instructions have a decoration after the immediate, i.e.
4430 // "mul vl". We parse them here and add tokens, which must be present in the
4431 // asm string in the tablegen instruction.
4432 bool NextIsVL =
4433 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4434 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4435 if (!getTok().getString().equals_insensitive("mul") ||
4436 !(NextIsVL || NextIsHash))
4437 return true;
4438
4439 Operands.push_back(
4440 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4441 Lex(); // Eat the "mul"
4442
4443 if (NextIsVL) {
4444 Operands.push_back(
4445 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4446 Lex(); // Eat the "vl"
4447 return false;
4448 }
4449
4450 if (NextIsHash) {
4451 Lex(); // Eat the #
4452 SMLoc S = getLoc();
4453
4454 // Parse immediate operand.
4455 const MCExpr *ImmVal;
4456 if (!Parser.parseExpression(ImmVal))
4457 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4458 Operands.push_back(AArch64Operand::CreateImm(
4459 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4460 getContext()));
4461 return MatchOperand_Success;
4462 }
4463 }
4464
4465 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4466}
4467
4468bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4469 StringRef &VecGroup) {
4470 MCAsmParser &Parser = getParser();
4471 auto Tok = Parser.getTok();
4472 if (Tok.isNot(AsmToken::Identifier))
4473 return true;
4474
4475 StringRef VG = StringSwitch<StringRef>(Tok.getString().lower())
4476 .Case("vgx2", "vgx2")
4477 .Case("vgx4", "vgx4")
4478 .Default("");
4479
4480 if (VG.empty())
4481 return true;
4482
4483 VecGroup = VG;
4484 Parser.Lex(); // Eat vgx[2|4]
4485 return false;
4486}
4487
4488bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4489 auto Tok = getTok();
4490 if (Tok.isNot(AsmToken::Identifier))
4491 return true;
4492
4493 auto Keyword = Tok.getString();
4494 Keyword = StringSwitch<StringRef>(Keyword.lower())
4495 .Case("sm", "sm")
4496 .Case("za", "za")
4497 .Default(Keyword);
4498 Operands.push_back(
4499 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4500
4501 Lex();
4502 return false;
4503}
4504
4505/// parseOperand - Parse a arm instruction operand. For now this parses the
4506/// operand regardless of the mnemonic.
4507bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4508 bool invertCondCode) {
4509 MCAsmParser &Parser = getParser();
4510
4511 OperandMatchResultTy ResTy =
4512 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
4513
4514 // Check if the current operand has a custom associated parser, if so, try to
4515 // custom parse the operand, or fallback to the general approach.
4516 if (ResTy == MatchOperand_Success)
4517 return false;
4518 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4519 // there was a match, but an error occurred, in which case, just return that
4520 // the operand parsing failed.
4521 if (ResTy == MatchOperand_ParseFail)
4522 return true;
4523
4524 // Nothing custom, so do general case parsing.
4525 SMLoc S, E;
4526 switch (getLexer().getKind()) {
4527 default: {
4528 SMLoc S = getLoc();
4529 const MCExpr *Expr;
4530 if (parseSymbolicImmVal(Expr))
4531 return Error(S, "invalid operand");
4532
4533 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4534 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4535 return false;
4536 }
4537 case AsmToken::LBrac: {
4538 Operands.push_back(
4539 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4540 Lex(); // Eat '['
4541
4542 // There's no comma after a '[', so we can parse the next operand
4543 // immediately.
4544 return parseOperand(Operands, false, false);
4545 }
4546 case AsmToken::LCurly: {
4547 if (!parseNeonVectorList(Operands))
4548 return false;
4549
4550 Operands.push_back(
4551 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4552 Lex(); // Eat '{'
4553
4554 // There's no comma after a '{', so we can parse the next operand
4555 // immediately.
4556 return parseOperand(Operands, false, false);
4557 }
4558 case AsmToken::Identifier: {
4559 // See if this is a "VG" decoration used by SME instructions.
4560 StringRef VecGroup;
4561 if (!parseOptionalVGOperand(Operands, VecGroup)) {
4562 Operands.push_back(
4563 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4564 return false;
4565 }
4566 // If we're expecting a Condition Code operand, then just parse that.
4567 if (isCondCode)
4568 return parseCondCode(Operands, invertCondCode);
4569
4570 // If it's a register name, parse it.
4571 if (!parseRegister(Operands))
4572 return false;
4573
4574 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4575 // by SVE instructions.
4576 if (!parseOptionalMulOperand(Operands))
4577 return false;
4578
4579 // If this is an "smstart" or "smstop" instruction, parse its special
4580 // keyword operand as an identifier.
4581 if (Mnemonic == "smstart" || Mnemonic == "smstop")
4582 return parseKeywordOperand(Operands);
4583
4584 // This could be an optional "shift" or "extend" operand.
4585 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
4586 // We can only continue if no tokens were eaten.
4587 if (GotShift != MatchOperand_NoMatch)
4588 return GotShift;
4589
4590 // If this is a two-word mnemonic, parse its special keyword
4591 // operand as an identifier.
4592 if (Mnemonic == "brb")
4593 return parseKeywordOperand(Operands);
4594
4595 // This was not a register so parse other operands that start with an
4596 // identifier (like labels) as expressions and create them as immediates.
4597 const MCExpr *IdVal;
4598 S = getLoc();
4599 if (getParser().parseExpression(IdVal))
4600 return true;
4601 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4602 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4603 return false;
4604 }
4605 case AsmToken::Integer:
4606 case AsmToken::Real:
4607 case AsmToken::Hash: {
4608 // #42 -> immediate.
4609 S = getLoc();
4610
4611 parseOptionalToken(AsmToken::Hash);
4612
4613 // Parse a negative sign
4614 bool isNegative = false;
4615 if (getTok().is(AsmToken::Minus)) {
4616 isNegative = true;
4617 // We need to consume this token only when we have a Real, otherwise
4618 // we let parseSymbolicImmVal take care of it
4619 if (Parser.getLexer().peekTok().is(AsmToken::Real))
4620 Lex();
4621 }
4622
4623 // The only Real that should come through here is a literal #0.0 for
4624 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4625 // so convert the value.
4626 const AsmToken &Tok = getTok();
4627 if (Tok.is(AsmToken::Real)) {
4628 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4629 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4630 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4631 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4632 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4633 return TokError("unexpected floating point literal");
4634 else if (IntVal != 0 || isNegative)
4635 return TokError("expected floating-point constant #0.0");
4636 Lex(); // Eat the token.
4637
4638 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4639 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4640 return false;
4641 }
4642
4643 const MCExpr *ImmVal;
4644 if (parseSymbolicImmVal(ImmVal))
4645 return true;
4646
4647 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4648 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4649 return false;
4650 }
4651 case AsmToken::Equal: {
4652 SMLoc Loc = getLoc();
4653 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4654 return TokError("unexpected token in operand");
4655 Lex(); // Eat '='
4656 const MCExpr *SubExprVal;
4657 if (getParser().parseExpression(SubExprVal))
4658 return true;
4659
4660 if (Operands.size() < 2 ||
4661 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4662 return Error(Loc, "Only valid when first operand is register");
4663
4664 bool IsXReg =
4665 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4666 Operands[1]->getReg());
4667
4668 MCContext& Ctx = getContext();
4669 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4670 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4671 if (isa<MCConstantExpr>(SubExprVal)) {
4672 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4673 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4674 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
4675 ShiftAmt += 16;
4676 Imm >>= 16;
4677 }
4678 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4679 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4680 Operands.push_back(AArch64Operand::CreateImm(
4681 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4682 if (ShiftAmt)
4683 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4684 ShiftAmt, true, S, E, Ctx));
4685 return false;
4686 }
4687 APInt Simm = APInt(64, Imm << ShiftAmt);
4688 // check if the immediate is an unsigned or signed 32-bit int for W regs
4689 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4690 return Error(Loc, "Immediate too large for register");
4691 }
4692 // If it is a label or an imm that cannot fit in a movz, put it into CP.
4693 const MCExpr *CPLoc =
4694 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4695 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4696 return false;
4697 }
4698 }
4699}
4700
4701bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4702 const MCExpr *Expr = nullptr;
4703 SMLoc L = getLoc();
4704 if (check(getParser().parseExpression(Expr), L, "expected expression"))
4705 return true;
4706 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4707 if (check(!Value, L, "expected constant expression"))
4708 return true;
4709 Out = Value->getValue();
4710 return false;
4711}
4712
4713bool AArch64AsmParser::parseComma() {
4714 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
4715 return true;
4716 // Eat the comma
4717 Lex();
4718 return false;
4719}
4720
4721bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
4722 unsigned First, unsigned Last) {
4723 unsigned Reg;
4724 SMLoc Start, End;
4725 if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register"))
4726 return true;
4727
4728 // Special handling for FP and LR; they aren't linearly after x28 in
4729 // the registers enum.
4730 unsigned RangeEnd = Last;
4731 if (Base == AArch64::X0) {
4732 if (Last == AArch64::FP) {
4733 RangeEnd = AArch64::X28;
4734 if (Reg == AArch64::FP) {
4735 Out = 29;
4736 return false;
4737 }
4738 }
4739 if (Last == AArch64::LR) {
4740 RangeEnd = AArch64::X28;
4741 if (Reg == AArch64::FP) {
4742 Out = 29;
4743 return false;
4744 } else if (Reg == AArch64::LR) {
4745 Out = 30;
4746 return false;
4747 }
4748 }
4749 }
4750
4751 if (check(Reg < First || Reg > RangeEnd, Start,
4752 Twine("expected register in range ") +
4753 AArch64InstPrinter::getRegisterName(First) + " to " +
4754 AArch64InstPrinter::getRegisterName(Last)))
4755 return true;
4756 Out = Reg - Base;
4757 return false;
4758}
4759
4760bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
4761 const MCParsedAsmOperand &Op2) const {
4762 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
4763 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
4764
4765 if (AOp1.isVectorList() && AOp2.isVectorList())
4766 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
4767 AOp1.getVectorListStart() == AOp2.getVectorListStart();
4768
4769 if (!AOp1.isReg() || !AOp2.isReg())
4770 return false;
4771
4772 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
4773 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
4774 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
4775
4776 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&(static_cast <bool> (AOp1.isScalarReg() && AOp2
.isScalarReg() && "Testing equality of non-scalar registers not supported"
) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 4777
, __extension__ __PRETTY_FUNCTION__))
4777 "Testing equality of non-scalar registers not supported")(static_cast <bool> (AOp1.isScalarReg() && AOp2
.isScalarReg() && "Testing equality of non-scalar registers not supported"
) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 4777
, __extension__ __PRETTY_FUNCTION__))
;
4778
4779 // Check if a registers match their sub/super register classes.
4780 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
4781 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
4782 if (AOp1.getRegEqualityTy() == EqualsSubReg)
4783 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
4784 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
4785 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
4786 if (AOp2.getRegEqualityTy() == EqualsSubReg)
4787 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
4788
4789 return false;
4790}
4791
4792/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
4793/// operands.
4794bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
4795 StringRef Name, SMLoc NameLoc,
4796 OperandVector &Operands) {
4797 Name = StringSwitch<StringRef>(Name.lower())
4798 .Case("beq", "b.eq")
4799 .Case("bne", "b.ne")
4800 .Case("bhs", "b.hs")
4801 .Case("bcs", "b.cs")
4802 .Case("blo", "b.lo")
4803 .Case("bcc", "b.cc")
4804 .Case("bmi", "b.mi")
4805 .Case("bpl", "b.pl")
4806 .Case("bvs", "b.vs")
4807 .Case("bvc", "b.vc")
4808 .Case("bhi", "b.hi")
4809 .Case("bls", "b.ls")
4810 .Case("bge", "b.ge")
4811 .Case("blt", "b.lt")
4812 .Case("bgt", "b.gt")
4813 .Case("ble", "b.le")
4814 .Case("bal", "b.al")
4815 .Case("bnv", "b.nv")
4816 .Default(Name);
4817
4818 // First check for the AArch64-specific .req directive.
4819 if (getTok().is(AsmToken::Identifier) &&
4820 getTok().getIdentifier().lower() == ".req") {
4821 parseDirectiveReq(Name, NameLoc);
4822 // We always return 'error' for this, as we're done with this
4823 // statement and don't need to match the 'instruction."
4824 return true;
4825 }
4826
4827 // Create the leading tokens for the mnemonic, split by '.' characters.
4828 size_t Start = 0, Next = Name.find('.');
4829 StringRef Head = Name.slice(Start, Next);
4830
4831 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
4832 // the SYS instruction.
4833 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
4834 Head == "cfp" || Head == "dvp" || Head == "cpp")
4835 return parseSysAlias(Head, NameLoc, Operands);
4836
4837 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
4838 Mnemonic = Head;
4839
4840 // Handle condition codes for a branch mnemonic
4841 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
4842 Start = Next;
4843 Next = Name.find('.', Start + 1);
4844 Head = Name.slice(Start + 1, Next);
4845
4846 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4847 (Head.data() - Name.data()));
4848 std::string Suggestion;
4849 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
4850 if (CC == AArch64CC::Invalid) {
4851 std::string Msg = "invalid condition code";
4852 if (!Suggestion.empty())
4853 Msg += ", did you mean " + Suggestion + "?";
4854 return Error(SuffixLoc, Msg);
4855 }
4856 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
4857 /*IsSuffix=*/true));
4858 Operands.push_back(
4859 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
4860 }
4861
4862 // Add the remaining tokens in the mnemonic.
4863 while (Next != StringRef::npos) {
4864 Start = Next;
4865 Next = Name.find('.', Start + 1);
4866 Head = Name.slice(Start, Next);
4867 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4868 (Head.data() - Name.data()) + 1);
4869 Operands.push_back(AArch64Operand::CreateToken(
4870 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
4871 }
4872
4873 // Conditional compare instructions have a Condition Code operand, which needs
4874 // to be parsed and an immediate operand created.
4875 bool condCodeFourthOperand =
4876 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
4877 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
4878 Head == "csinc" || Head == "csinv" || Head == "csneg");
4879
4880 // These instructions are aliases to some of the conditional select
4881 // instructions. However, the condition code is inverted in the aliased
4882 // instruction.
4883 //
4884 // FIXME: Is this the correct way to handle these? Or should the parser
4885 // generate the aliased instructions directly?
4886 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
4887 bool condCodeThirdOperand =
4888 (Head == "cinc" || Head == "cinv" || Head == "cneg");
4889
4890 // Read the remaining operands.
4891 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4892
4893 unsigned N = 1;
4894 do {
4895 // Parse and remember the operand.
4896 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
4897 (N == 3 && condCodeThirdOperand) ||
4898 (N == 2 && condCodeSecondOperand),
4899 condCodeSecondOperand || condCodeThirdOperand)) {
4900 return true;
4901 }
4902
4903 // After successfully parsing some operands there are three special cases
4904 // to consider (i.e. notional operands not separated by commas). Two are
4905 // due to memory specifiers:
4906 // + An RBrac will end an address for load/store/prefetch
4907 // + An '!' will indicate a pre-indexed operation.
4908 //
4909 // And a further case is '}', which ends a group of tokens specifying the
4910 // SME accumulator array 'ZA' or tile vector, i.e.
4911 //
4912 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
4913 //
4914 // It's someone else's responsibility to make sure these tokens are sane
4915 // in the given context!
4916
4917 if (parseOptionalToken(AsmToken::RBrac))
4918 Operands.push_back(
4919 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4920 if (parseOptionalToken(AsmToken::Exclaim))
4921 Operands.push_back(
4922 AArch64Operand::CreateToken("!", getLoc(), getContext()));
4923 if (parseOptionalToken(AsmToken::RCurly))
4924 Operands.push_back(
4925 AArch64Operand::CreateToken("}", getLoc(), getContext()));
4926
4927 ++N;
4928 } while (parseOptionalToken(AsmToken::Comma));
4929 }
4930
4931 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4932 return true;
4933
4934 return false;
4935}
4936
4937static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
4938 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31))(static_cast <bool> ((ZReg >= AArch64::Z0) &&
(ZReg <= AArch64::Z31)) ? void (0) : __assert_fail ("(ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 4938
, __extension__ __PRETTY_FUNCTION__))
;
4939 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
4940 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
4941 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
4942 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
4943 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
4944 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
4945}
4946
4947// FIXME: This entire function is a giant hack to provide us with decent
4948// operand range validation/diagnostics until TableGen/MC can be extended
4949// to support autogeneration of this kind of validation.
4950bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
4951 SmallVectorImpl<SMLoc> &Loc) {
4952 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4953 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
4954
4955 // A prefix only applies to the instruction following it. Here we extract
4956 // prefix information for the next instruction before validating the current
4957 // one so that in the case of failure we don't erronously continue using the
4958 // current prefix.
4959 PrefixInfo Prefix = NextPrefix;
4960 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
4961
4962 // Before validating the instruction in isolation we run through the rules
4963 // applicable when it follows a prefix instruction.
4964 // NOTE: brk & hlt can be prefixed but require no additional validation.
4965 if (Prefix.isActive() &&
4966 (Inst.getOpcode() != AArch64::BRK) &&
4967 (Inst.getOpcode() != AArch64::HLT)) {
4968
4969 // Prefixed intructions must have a destructive operand.
4970 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
4971 AArch64::NotDestructive)
4972 return Error(IDLoc, "instruction is unpredictable when following a"
4973 " movprfx, suggest replacing movprfx with mov");
4974
4975 // Destination operands must match.
4976 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
4977 return Error(Loc[0], "instruction is unpredictable when following a"
4978 " movprfx writing to a different destination");
4979
4980 // Destination operand must not be used in any other location.
4981 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
4982 if (Inst.getOperand(i).isReg() &&
4983 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
4984 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
4985 return Error(Loc[0], "instruction is unpredictable when following a"
4986 " movprfx and destination also used as non-destructive"
4987 " source");
4988 }
4989
4990 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
4991 if (Prefix.isPredicated()) {
4992 int PgIdx = -1;
4993
4994 // Find the instructions general predicate.
4995 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
4996 if (Inst.getOperand(i).isReg() &&
4997 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
4998 PgIdx = i;
4999 break;
5000 }
5001
5002 // Instruction must be predicated if the movprfx is predicated.
5003 if (PgIdx == -1 ||
5004 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
5005 return Error(IDLoc, "instruction is unpredictable when following a"
5006 " predicated movprfx, suggest using unpredicated movprfx");
5007
5008 // Instruction must use same general predicate as the movprfx.
5009 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5010 return Error(IDLoc, "instruction is unpredictable when following a"
5011 " predicated movprfx using a different general predicate");
5012
5013 // Instruction element type must match the movprfx.
5014 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5015 return Error(IDLoc, "instruction is unpredictable when following a"
5016 " predicated movprfx with a different element size");
5017 }
5018 }
5019
5020 // Check for indexed addressing modes w/ the base register being the
5021 // same as a destination/source register or pair load where
5022 // the Rt == Rt2. All of those are undefined behaviour.
5023 switch (Inst.getOpcode()) {
5024 case AArch64::LDPSWpre:
5025 case AArch64::LDPWpost:
5026 case AArch64::LDPWpre:
5027 case AArch64::LDPXpost:
5028 case AArch64::LDPXpre: {
5029 unsigned Rt = Inst.getOperand(1).getReg();
5030 unsigned Rt2 = Inst.getOperand(2).getReg();
5031 unsigned Rn = Inst.getOperand(3).getReg();
5032 if (RI->isSubRegisterEq(Rn, Rt))
5033 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5034 "is also a destination");
5035 if (RI->isSubRegisterEq(Rn, Rt2))
5036 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5037 "is also a destination");
5038 [[fallthrough]];
5039 }
5040 case AArch64::LDPDi:
5041 case AArch64::LDPQi:
5042 case AArch64::LDPSi:
5043 case AArch64::LDPSWi:
5044 case AArch64::LDPWi:
5045 case AArch64::LDPXi: {
5046 unsigned Rt = Inst.getOperand(0).getReg();
5047 unsigned Rt2 = Inst.getOperand(1).getReg();
5048 if (Rt == Rt2)
5049 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5050 break;
5051 }
5052 case AArch64::LDPDpost:
5053 case AArch64::LDPDpre:
5054 case AArch64::LDPQpost:
5055 case AArch64::LDPQpre:
5056 case AArch64::LDPSpost:
5057 case AArch64::LDPSpre:
5058 case AArch64::LDPSWpost: {
5059 unsigned Rt = Inst.getOperand(1).getReg();
5060 unsigned Rt2 = Inst.getOperand(2).getReg();
5061 if (Rt == Rt2)
5062 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5063 break;
5064 }
5065 case AArch64::STPDpost:
5066 case AArch64::STPDpre:
5067 case AArch64::STPQpost:
5068 case AArch64::STPQpre:
5069 case AArch64::STPSpost:
5070 case AArch64::STPSpre:
5071 case AArch64::STPWpost:
5072 case AArch64::STPWpre:
5073 case AArch64::STPXpost:
5074 case AArch64::STPXpre: {
5075 unsigned Rt = Inst.getOperand(1).getReg();
5076 unsigned Rt2 = Inst.getOperand(2).getReg();
5077 unsigned Rn = Inst.getOperand(3).getReg();
5078 if (RI->isSubRegisterEq(Rn, Rt))
5079 return Error(Loc[0], "unpredictable STP instruction, writeback base "
5080 "is also a source");
5081 if (RI->isSubRegisterEq(Rn, Rt2))
5082 return Error(Loc[1], "unpredictable STP instruction, writeback base "
5083 "is also a source");
5084 break;
5085 }
5086 case AArch64::LDRBBpre:
5087 case AArch64::LDRBpre:
5088 case AArch64::LDRHHpre:
5089 case AArch64::LDRHpre:
5090 case AArch64::LDRSBWpre:
5091 case AArch64::LDRSBXpre:
5092 case AArch64::LDRSHWpre:
5093 case AArch64::LDRSHXpre:
5094 case AArch64::LDRSWpre:
5095 case AArch64::LDRWpre:
5096 case AArch64::LDRXpre:
5097 case AArch64::LDRBBpost:
5098 case AArch64::LDRBpost:
5099 case AArch64::LDRHHpost:
5100 case AArch64::LDRHpost:
5101 case AArch64::LDRSBWpost:
5102 case AArch64::LDRSBXpost:
5103 case AArch64::LDRSHWpost:
5104 case AArch64::LDRSHXpost:
5105 case AArch64::LDRSWpost:
5106 case AArch64::LDRWpost:
5107 case AArch64::LDRXpost: {
5108 unsigned Rt = Inst.getOperand(1).getReg();
5109 unsigned Rn = Inst.getOperand(2).getReg();
5110 if (RI->isSubRegisterEq(Rn, Rt))
5111 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5112 "is also a source");
5113 break;
5114 }
5115 case AArch64::STRBBpost:
5116 case AArch64::STRBpost:
5117 case AArch64::STRHHpost:
5118 case AArch64::STRHpost:
5119 case AArch64::STRWpost:
5120 case AArch64::STRXpost:
5121 case AArch64::STRBBpre:
5122 case AArch64::STRBpre:
5123 case AArch64::STRHHpre:
5124 case AArch64::STRHpre:
5125 case AArch64::STRWpre:
5126 case AArch64::STRXpre: {
5127 unsigned Rt = Inst.getOperand(1).getReg();
5128 unsigned Rn = Inst.getOperand(2).getReg();
5129 if (RI->isSubRegisterEq(Rn, Rt))
5130 return Error(Loc[0], "unpredictable STR instruction, writeback base "
5131 "is also a source");
5132 break;
5133 }
5134 case AArch64::STXRB:
5135 case AArch64::STXRH:
5136 case AArch64::STXRW:
5137 case AArch64::STXRX:
5138 case AArch64::STLXRB:
5139 case AArch64::STLXRH:
5140 case AArch64::STLXRW:
5141 case AArch64::STLXRX: {
5142 unsigned Rs = Inst.getOperand(0).getReg();
5143 unsigned Rt = Inst.getOperand(1).getReg();
5144 unsigned Rn = Inst.getOperand(2).getReg();
5145 if (RI->isSubRegisterEq(Rt, Rs) ||
5146 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5147 return Error(Loc[0],
5148 "unpredictable STXR instruction, status is also a source");
5149 break;
5150 }
5151 case AArch64::STXPW:
5152 case AArch64::STXPX:
5153 case AArch64::STLXPW:
5154 case AArch64::STLXPX: {
5155 unsigned Rs = Inst.getOperand(0).getReg();
5156 unsigned Rt1 = Inst.getOperand(1).getReg();
5157 unsigned Rt2 = Inst.getOperand(2).getReg();
5158 unsigned Rn = Inst.getOperand(3).getReg();
5159 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5160 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5161 return Error(Loc[0],
5162 "unpredictable STXP instruction, status is also a source");
5163 break;
5164 }
5165 case AArch64::LDRABwriteback:
5166 case AArch64::LDRAAwriteback: {
5167 unsigned Xt = Inst.getOperand(0).getReg();
5168 unsigned Xn = Inst.getOperand(1).getReg();
5169 if (Xt == Xn)
5170 return Error(Loc[0],
5171 "unpredictable LDRA instruction, writeback base"
5172 " is also a destination");
5173 break;
5174 }
5175 }
5176
5177 // Check v8.8-A memops instructions.
5178 switch (Inst.getOpcode()) {
5179 case AArch64::CPYFP:
5180 case AArch64::CPYFPWN:
5181 case AArch64::CPYFPRN:
5182 case AArch64::CPYFPN:
5183 case AArch64::CPYFPWT:
5184 case AArch64::CPYFPWTWN:
5185 case AArch64::CPYFPWTRN:
5186 case AArch64::CPYFPWTN:
5187 case AArch64::CPYFPRT:
5188 case AArch64::CPYFPRTWN:
5189 case AArch64::CPYFPRTRN:
5190 case AArch64::CPYFPRTN:
5191 case AArch64::CPYFPT:
5192 case AArch64::CPYFPTWN:
5193 case AArch64::CPYFPTRN:
5194 case AArch64::CPYFPTN:
5195 case AArch64::CPYFM:
5196 case AArch64::CPYFMWN:
5197 case AArch64::CPYFMRN:
5198 case AArch64::CPYFMN:
5199 case AArch64::CPYFMWT:
5200 case AArch64::CPYFMWTWN:
5201 case AArch64::CPYFMWTRN:
5202 case AArch64::CPYFMWTN:
5203 case AArch64::CPYFMRT:
5204 case AArch64::CPYFMRTWN:
5205 case AArch64::CPYFMRTRN:
5206 case AArch64::CPYFMRTN:
5207 case AArch64::CPYFMT:
5208 case AArch64::CPYFMTWN:
5209 case AArch64::CPYFMTRN:
5210 case AArch64::CPYFMTN:
5211 case AArch64::CPYFE:
5212 case AArch64::CPYFEWN:
5213 case AArch64::CPYFERN:
5214 case AArch64::CPYFEN:
5215 case AArch64::CPYFEWT:
5216 case AArch64::CPYFEWTWN:
5217 case AArch64::CPYFEWTRN:
5218 case AArch64::CPYFEWTN:
5219 case AArch64::CPYFERT:
5220 case AArch64::CPYFERTWN:
5221 case AArch64::CPYFERTRN:
5222 case AArch64::CPYFERTN:
5223 case AArch64::CPYFET:
5224 case AArch64::CPYFETWN:
5225 case AArch64::CPYFETRN:
5226 case AArch64::CPYFETN:
5227 case AArch64::CPYP:
5228 case AArch64::CPYPWN:
5229 case AArch64::CPYPRN:
5230 case AArch64::CPYPN:
5231 case AArch64::CPYPWT:
5232 case AArch64::CPYPWTWN:
5233 case AArch64::CPYPWTRN:
5234 case AArch64::CPYPWTN:
5235 case AArch64::CPYPRT:
5236 case AArch64::CPYPRTWN:
5237 case AArch64::CPYPRTRN:
5238 case AArch64::CPYPRTN:
5239 case AArch64::CPYPT:
5240 case AArch64::CPYPTWN:
5241 case AArch64::CPYPTRN:
5242 case AArch64::CPYPTN:
5243 case AArch64::CPYM:
5244 case AArch64::CPYMWN:
5245 case AArch64::CPYMRN:
5246 case AArch64::CPYMN:
5247 case AArch64::CPYMWT:
5248 case AArch64::CPYMWTWN:
5249 case AArch64::CPYMWTRN:
5250 case AArch64::CPYMWTN:
5251 case AArch64::CPYMRT:
5252 case AArch64::CPYMRTWN:
5253 case AArch64::CPYMRTRN:
5254 case AArch64::CPYMRTN:
5255 case AArch64::CPYMT:
5256 case AArch64::CPYMTWN:
5257 case AArch64::CPYMTRN:
5258 case AArch64::CPYMTN:
5259 case AArch64::CPYE:
5260 case AArch64::CPYEWN:
5261 case AArch64::CPYERN:
5262 case AArch64::CPYEN:
5263 case AArch64::CPYEWT:
5264 case AArch64::CPYEWTWN:
5265 case AArch64::CPYEWTRN:
5266 case AArch64::CPYEWTN:
5267 case AArch64::CPYERT:
5268 case AArch64::CPYERTWN:
5269 case AArch64::CPYERTRN:
5270 case AArch64::CPYERTN:
5271 case AArch64::CPYET:
5272 case AArch64::CPYETWN:
5273 case AArch64::CPYETRN:
5274 case AArch64::CPYETN: {
5275 unsigned Xd_wb = Inst.getOperand(0).getReg();
5276 unsigned Xs_wb = Inst.getOperand(1).getReg();
5277 unsigned Xn_wb = Inst.getOperand(2).getReg();
5278 unsigned Xd = Inst.getOperand(3).getReg();
5279 unsigned Xs = Inst.getOperand(4).getReg();
5280 unsigned Xn = Inst.getOperand(5).getReg();
5281 if (Xd_wb != Xd)
5282 return Error(Loc[0],
5283 "invalid CPY instruction, Xd_wb and Xd do not match");
5284 if (Xs_wb != Xs)
5285 return Error(Loc[0],
5286 "invalid CPY instruction, Xs_wb and Xs do not match");
5287 if (Xn_wb != Xn)
5288 return Error(Loc[0],
5289 "invalid CPY instruction, Xn_wb and Xn do not match");
5290 if (Xd == Xs)
5291 return Error(Loc[0], "invalid CPY instruction, destination and source"
5292 " registers are the same");
5293 if (Xd == Xn)
5294 return Error(Loc[0], "invalid CPY instruction, destination and size"
5295 " registers are the same");
5296 if (Xs == Xn)
5297 return Error(Loc[0], "invalid CPY instruction, source and size"
5298 " registers are the same");
5299 break;
5300 }
5301 case AArch64::SETP:
5302 case AArch64::SETPT:
5303 case AArch64::SETPN:
5304 case AArch64::SETPTN:
5305 case AArch64::SETM:
5306 case AArch64::SETMT:
5307 case AArch64::SETMN:
5308 case AArch64::SETMTN:
5309 case AArch64::SETE:
5310 case AArch64::SETET:
5311 case AArch64::SETEN:
5312 case AArch64::SETETN:
5313 case AArch64::SETGP:
5314 case AArch64::SETGPT:
5315 case AArch64::SETGPN:
5316 case AArch64::SETGPTN:
5317 case AArch64::SETGM:
5318 case AArch64::SETGMT:
5319 case AArch64::SETGMN:
5320 case AArch64::SETGMTN:
5321 case AArch64::MOPSSETGE:
5322 case AArch64::MOPSSETGET:
5323 case AArch64::MOPSSETGEN:
5324 case AArch64::MOPSSETGETN: {
5325 unsigned Xd_wb = Inst.getOperand(0).getReg();
5326 unsigned Xn_wb = Inst.getOperand(1).getReg();
5327 unsigned Xd = Inst.getOperand(2).getReg();
5328 unsigned Xn = Inst.getOperand(3).getReg();
5329 unsigned Xm = Inst.getOperand(4).getReg();
5330 if (Xd_wb != Xd)
5331 return Error(Loc[0],
5332 "invalid SET instruction, Xd_wb and Xd do not match");
5333 if (Xn_wb != Xn)
5334 return Error(Loc[0],
5335 "invalid SET instruction, Xn_wb and Xn do not match");
5336 if (Xd == Xn)
5337 return Error(Loc[0], "invalid SET instruction, destination and size"
5338 " registers are the same");
5339 if (Xd == Xm)
5340 return Error(Loc[0], "invalid SET instruction, destination and source"
5341 " registers are the same");
5342 if (Xn == Xm)
5343 return Error(Loc[0], "invalid SET instruction, source and size"
5344 " registers are the same");
5345 break;
5346 }
5347 }
5348
5349 // Now check immediate ranges. Separate from the above as there is overlap
5350 // in the instructions being checked and this keeps the nested conditionals
5351 // to a minimum.
5352 switch (Inst.getOpcode()) {
5353 case AArch64::ADDSWri:
5354 case AArch64::ADDSXri:
5355 case AArch64::ADDWri:
5356 case AArch64::ADDXri:
5357 case AArch64::SUBSWri:
5358 case AArch64::SUBSXri:
5359 case AArch64::SUBWri:
5360 case AArch64::SUBXri: {
5361 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5362 // some slight duplication here.
5363 if (Inst.getOperand(2).isExpr()) {
5364 const MCExpr *Expr = Inst.getOperand(2).getExpr();
5365 AArch64MCExpr::VariantKind ELFRefKind;
5366 MCSymbolRefExpr::VariantKind DarwinRefKind;
5367 int64_t Addend;
5368 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
5369
5370 // Only allow these with ADDXri.
5371 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
5372 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
5373 Inst.getOpcode() == AArch64::ADDXri)
5374 return false;
5375
5376 // Only allow these with ADDXri/ADDWri
5377 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
5378 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
5379 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
5380 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
5381 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
5382 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
5383 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
5384 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
5385 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
5386 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
5387 (Inst.getOpcode() == AArch64::ADDXri ||
5388 Inst.getOpcode() == AArch64::ADDWri))
5389 return false;
5390
5391 // Don't allow symbol refs in the immediate field otherwise
5392 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5393 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5394 // 'cmp w0, 'borked')
5395 return Error(Loc.back(), "invalid immediate expression");
5396 }
5397 // We don't validate more complex expressions here
5398 }
5399 return false;
5400 }
5401 default:
5402 return false;
5403 }
5404}
5405
5406static std::string AArch64MnemonicSpellCheck(StringRef S,
5407 const FeatureBitset &FBS,
5408 unsigned VariantID = 0);
5409
5410bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5411 uint64_t ErrorInfo,
5412 OperandVector &Operands) {
5413 switch (ErrCode) {
5414 case Match_InvalidTiedOperand: {
5415 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
5416 if (Op.isVectorList())
5417 return Error(Loc, "operand must match destination register list");
5418
5419 assert(Op.isReg() && "Unexpected operand type")(static_cast <bool> (Op.isReg() && "Unexpected operand type"
) ? void (0) : __assert_fail ("Op.isReg() && \"Unexpected operand type\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5419
, __extension__ __PRETTY_FUNCTION__))
;
5420 switch (Op.getRegEqualityTy()) {
5421 case RegConstraintEqualityTy::EqualsSubReg:
5422 return Error(Loc, "operand must be 64-bit form of destination register");
5423 case RegConstraintEqualityTy::EqualsSuperReg:
5424 return Error(Loc, "operand must be 32-bit form of destination register");
5425 case RegConstraintEqualityTy::EqualsReg:
5426 return Error(Loc, "operand must match destination register");
5427 }
5428 llvm_unreachable("Unknown RegConstraintEqualityTy")::llvm::llvm_unreachable_internal("Unknown RegConstraintEqualityTy"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5428
)
;
5429 }
5430 case Match_MissingFeature:
5431 return Error(Loc,
5432 "instruction requires a CPU feature not currently enabled");
5433 case Match_InvalidOperand:
5434 return Error(Loc, "invalid operand for instruction");
5435 case Match_InvalidSuffix:
5436 return Error(Loc, "invalid type suffix for instruction");
5437 case Match_InvalidCondCode:
5438 return Error(Loc, "expected AArch64 condition code");
5439 case Match_AddSubRegExtendSmall:
5440 return Error(Loc,
5441 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5442 case Match_AddSubRegExtendLarge:
5443 return Error(Loc,
5444 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5445 case Match_AddSubSecondSource:
5446 return Error(Loc,
5447 "expected compatible register, symbol or integer in range [0, 4095]");
5448 case Match_LogicalSecondSource:
5449 return Error(Loc, "expected compatible register or logical immediate");
5450 case Match_InvalidMovImm32Shift:
5451 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
5452 case Match_InvalidMovImm64Shift:
5453 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
5454 case Match_AddSubRegShift32:
5455 return Error(Loc,
5456 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5457 case Match_AddSubRegShift64:
5458 return Error(Loc,
5459 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5460 case Match_InvalidFPImm:
5461 return Error(Loc,
5462 "expected compatible register or floating-point constant");
5463 case Match_InvalidMemoryIndexedSImm6:
5464 return Error(Loc, "index must be an integer in range [-32, 31].");
5465 case Match_InvalidMemoryIndexedSImm5:
5466 return Error(Loc, "index must be an integer in range [-16, 15].");
5467 case Match_InvalidMemoryIndexed1SImm4:
5468 return Error(Loc, "index must be an integer in range [-8, 7].");
5469 case Match_InvalidMemoryIndexed2SImm4:
5470 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5471 case Match_InvalidMemoryIndexed3SImm4:
5472 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5473 case Match_InvalidMemoryIndexed4SImm4:
5474 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5475 case Match_InvalidMemoryIndexed16SImm4:
5476 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5477 case Match_InvalidMemoryIndexed32SImm4:
5478 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5479 case Match_InvalidMemoryIndexed1SImm6:
5480 return Error(Loc, "index must be an integer in range [-32, 31].");
5481 case Match_InvalidMemoryIndexedSImm8:
5482 return Error(Loc, "index must be an integer in range [-128, 127].");
5483 case Match_InvalidMemoryIndexedSImm9:
5484 return Error(Loc, "index must be an integer in range [-256, 255].");
5485 case Match_InvalidMemoryIndexed16SImm9:
5486 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5487 case Match_InvalidMemoryIndexed8SImm10:
5488 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5489 case Match_InvalidMemoryIndexed4SImm7:
5490 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5491 case Match_InvalidMemoryIndexed8SImm7:
5492 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5493 case Match_InvalidMemoryIndexed16SImm7:
5494 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5495 case Match_InvalidMemoryIndexed8UImm5:
5496 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5497 case Match_InvalidMemoryIndexed8UImm3:
5498 return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
5499 case Match_InvalidMemoryIndexed4UImm5:
5500 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5501 case Match_InvalidMemoryIndexed2UImm5:
5502 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5503 case Match_InvalidMemoryIndexed8UImm6:
5504 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5505 case Match_InvalidMemoryIndexed16UImm6:
5506 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5507 case Match_InvalidMemoryIndexed4UImm6:
5508 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5509 case Match_InvalidMemoryIndexed2UImm6:
5510 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5511 case Match_InvalidMemoryIndexed1UImm6:
5512 return Error(Loc, "index must be in range [0, 63].");
5513 case Match_InvalidMemoryWExtend8:
5514 return Error(Loc,
5515 "expected 'uxtw' or 'sxtw' with optional shift of #0");
5516 case Match_InvalidMemoryWExtend16:
5517 return Error(Loc,
5518 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5519 case Match_InvalidMemoryWExtend32:
5520 return Error(Loc,
5521 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5522 case Match_InvalidMemoryWExtend64:
5523 return Error(Loc,
5524 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5525 case Match_InvalidMemoryWExtend128:
5526 return Error(Loc,
5527 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5528 case Match_InvalidMemoryXExtend8:
5529 return Error(Loc,
5530 "expected 'lsl' or 'sxtx' with optional shift of #0");
5531 case Match_InvalidMemoryXExtend16:
5532 return Error(Loc,
5533 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5534 case Match_InvalidMemoryXExtend32:
5535 return Error(Loc,
5536 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5537 case Match_InvalidMemoryXExtend64:
5538 return Error(Loc,
5539 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5540 case Match_InvalidMemoryXExtend128:
5541 return Error(Loc,
5542 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5543 case Match_InvalidMemoryIndexed1:
5544 return Error(Loc, "index must be an integer in range [0, 4095].");
5545 case Match_InvalidMemoryIndexed2:
5546 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
5547 case Match_InvalidMemoryIndexed4:
5548 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
5549 case Match_InvalidMemoryIndexed8:
5550 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
5551 case Match_InvalidMemoryIndexed16:
5552 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
5553 case Match_InvalidImm0_0:
5554 return Error(Loc, "immediate must be 0.");
5555 case Match_InvalidImm0_1:
5556 return Error(Loc, "immediate must be an integer in range [0, 1].");
5557 case Match_InvalidImm0_3:
5558 return Error(Loc, "immediate must be an integer in range [0, 3].");
5559 case Match_InvalidImm0_7:
5560 return Error(Loc, "immediate must be an integer in range [0, 7].");
5561 case Match_InvalidImm0_15:
5562 return Error(Loc, "immediate must be an integer in range [0, 15].");
5563 case Match_InvalidImm0_31:
5564 return Error(Loc, "immediate must be an integer in range [0, 31].");
5565 case Match_InvalidImm0_63:
5566 return Error(Loc, "immediate must be an integer in range [0, 63].");
5567 case Match_InvalidImm0_127:
5568 return Error(Loc, "immediate must be an integer in range [0, 127].");
5569 case Match_InvalidImm0_255:
5570 return Error(Loc, "immediate must be an integer in range [0, 255].");
5571 case Match_InvalidImm0_65535:
5572 return Error(Loc, "immediate must be an integer in range [0, 65535].");
5573 case Match_InvalidImm1_8:
5574 return Error(Loc, "immediate must be an integer in range [1, 8].");
5575 case Match_InvalidImm1_16:
5576 return Error(Loc, "immediate must be an integer in range [1, 16].");
5577 case Match_InvalidImm1_32:
5578 return Error(Loc, "immediate must be an integer in range [1, 32].");
5579 case Match_InvalidImm1_64:
5580 return Error(Loc, "immediate must be an integer in range [1, 64].");
5581 case Match_InvalidMemoryIndexedRange2UImm0:
5582 return Error(Loc, "vector select offset must be the immediate range 0:1.");
5583 case Match_InvalidMemoryIndexedRange2UImm1:
5584 return Error(Loc, "vector select offset must be an immediate range of the "
5585 "form <immf>:<imml>, where the first "
5586 "immediate is a multiple of 2 in the range [0, 2], and "
5587 "the second immediate is immf + 1.");
5588 case Match_InvalidMemoryIndexedRange2UImm2:
5589 case Match_InvalidMemoryIndexedRange2UImm3:
5590 return Error(
5591 Loc,
5592 "vector select offset must be an immediate range of the form "
5593 "<immf>:<imml>, "
5594 "where the first immediate is a multiple of 2 in the range [0, 6] or "
5595 "[0, 14] "
5596 "depending on the instruction, and the second immediate is immf + 1.");
5597 case Match_InvalidMemoryIndexedRange4UImm0:
5598 return Error(Loc, "vector select offset must be the immediate range 0:3.");
5599 case Match_InvalidMemoryIndexedRange4UImm1:
5600 case Match_InvalidMemoryIndexedRange4UImm2:
5601 return Error(
5602 Loc,
5603 "vector select offset must be an immediate range of the form "
5604 "<immf>:<imml>, "
5605 "where the first immediate is a multiple of 4 in the range [0, 4] or "
5606 "[0, 12] "
5607 "depending on the instruction, and the second immediate is immf + 3.");
5608 case Match_InvalidSVEAddSubImm8:
5609 return Error(Loc, "immediate must be an integer in range [0, 255]"
5610 " with a shift amount of 0");
5611 case Match_InvalidSVEAddSubImm16:
5612 case Match_InvalidSVEAddSubImm32:
5613 case Match_InvalidSVEAddSubImm64:
5614 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
5615 "multiple of 256 in range [256, 65280]");
5616 case Match_InvalidSVECpyImm8:
5617 return Error(Loc, "immediate must be an integer in range [-128, 255]"
5618 " with a shift amount of 0");
5619 case Match_InvalidSVECpyImm16:
5620 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5621 "multiple of 256 in range [-32768, 65280]");
5622 case Match_InvalidSVECpyImm32:
5623 case Match_InvalidSVECpyImm64:
5624 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5625 "multiple of 256 in range [-32768, 32512]");
5626 case Match_InvalidIndexRange0_0:
5627 return Error(Loc, "expected lane specifier '[0]'");
5628 case Match_InvalidIndexRange1_1:
5629 return Error(Loc, "expected lane specifier '[1]'");
5630 case Match_InvalidIndexRange0_15:
5631 return Error(Loc, "vector lane must be an integer in range [0, 15].");
5632 case Match_InvalidIndexRange0_7:
5633 return Error(Loc, "vector lane must be an integer in range [0, 7].");
5634 case Match_InvalidIndexRange0_3:
5635 return Error(Loc, "vector lane must be an integer in range [0, 3].");
5636 case Match_InvalidIndexRange0_1:
5637 return Error(Loc, "vector lane must be an integer in range [0, 1].");
5638 case Match_InvalidSVEIndexRange0_63:
5639 return Error(Loc, "vector lane must be an integer in range [0, 63].");
5640 case Match_InvalidSVEIndexRange0_31:
5641 return Error(Loc, "vector lane must be an integer in range [0, 31].");
5642 case Match_InvalidSVEIndexRange0_15:
5643 return Error(Loc, "vector lane must be an integer in range [0, 15].");
5644 case Match_InvalidSVEIndexRange0_7:
5645 return Error(Loc, "vector lane must be an integer in range [0, 7].");
5646 case Match_InvalidSVEIndexRange0_3:
5647 return Error(Loc, "vector lane must be an integer in range [0, 3].");
5648 case Match_InvalidLabel:
5649 return Error(Loc, "expected label or encodable integer pc offset");
5650 case Match_MRS:
5651 return Error(Loc, "expected readable system register");
5652 case Match_MSR:
5653 case Match_InvalidSVCR:
5654 return Error(Loc, "expected writable system register or pstate");
5655 case Match_InvalidComplexRotationEven:
5656 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
5657 case Match_InvalidComplexRotationOdd:
5658 return Error(Loc, "complex rotation must be 90 or 270.");
5659 case Match_MnemonicFail: {
5660 std::string Suggestion = AArch64MnemonicSpellCheck(
5661 ((AArch64Operand &)*Operands[0]).getToken(),
5662 ComputeAvailableFeatures(STI->getFeatureBits()));
5663 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
5664 }
5665 case Match_InvalidGPR64shifted8:
5666 return Error(Loc, "register must be x0..x30 or xzr, without shift");
5667 case Match_InvalidGPR64shifted16:
5668 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
5669 case Match_InvalidGPR64shifted32:
5670 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
5671 case Match_InvalidGPR64shifted64:
5672 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
5673 case Match_InvalidGPR64shifted128:
5674 return Error(
5675 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
5676 case Match_InvalidGPR64NoXZRshifted8:
5677 return Error(Loc, "register must be x0..x30 without shift");
5678 case Match_InvalidGPR64NoXZRshifted16:
5679 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
5680 case Match_InvalidGPR64NoXZRshifted32:
5681 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
5682 case Match_InvalidGPR64NoXZRshifted64:
5683 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
5684 case Match_InvalidGPR64NoXZRshifted128:
5685 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
5686 case Match_InvalidZPR32UXTW8:
5687 case Match_InvalidZPR32SXTW8:
5688 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
5689 case Match_InvalidZPR32UXTW16:
5690 case Match_InvalidZPR32SXTW16:
5691 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
5692 case Match_InvalidZPR32UXTW32:
5693 case Match_InvalidZPR32SXTW32:
5694 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
5695 case Match_InvalidZPR32UXTW64:
5696 case Match_InvalidZPR32SXTW64:
5697 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
5698 case Match_InvalidZPR64UXTW8:
5699 case Match_InvalidZPR64SXTW8:
5700 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
5701 case Match_InvalidZPR64UXTW16:
5702 case Match_InvalidZPR64SXTW16:
5703 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
5704 case Match_InvalidZPR64UXTW32:
5705 case Match_InvalidZPR64SXTW32:
5706 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
5707 case Match_InvalidZPR64UXTW64:
5708 case Match_InvalidZPR64SXTW64:
5709 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
5710 case Match_InvalidZPR32LSL8:
5711 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
5712 case Match_InvalidZPR32LSL16:
5713 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
5714 case Match_InvalidZPR32LSL32:
5715 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
5716 case Match_InvalidZPR32LSL64:
5717 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
5718 case Match_InvalidZPR64LSL8:
5719 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
5720 case Match_InvalidZPR64LSL16:
5721 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
5722 case Match_InvalidZPR64LSL32:
5723 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
5724 case Match_InvalidZPR64LSL64:
5725 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
5726 case Match_InvalidZPR0:
5727 return Error(Loc, "expected register without element width suffix");
5728 case Match_InvalidZPR8:
5729 case Match_InvalidZPR16:
5730 case Match_InvalidZPR32:
5731 case Match_InvalidZPR64:
5732 case Match_InvalidZPR128:
5733 return Error(Loc, "invalid element width");
5734 case Match_InvalidZPR_3b8:
5735 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
5736 case Match_InvalidZPR_3b16:
5737 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
5738 case Match_InvalidZPR_3b32:
5739 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
5740 case Match_InvalidZPR_4b8:
5741 return Error(Loc,
5742 "Invalid restricted vector register, expected z0.b..z15.b");
5743 case Match_InvalidZPR_4b16:
5744 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
5745 case Match_InvalidZPR_4b32:
5746 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
5747 case Match_InvalidZPR_4b64:
5748 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
5749 case Match_InvalidSVEPattern:
5750 return Error(Loc, "invalid predicate pattern");
5751 case Match_InvalidSVEPredicateAnyReg:
5752 case Match_InvalidSVEPredicateBReg:
5753 case Match_InvalidSVEPredicateHReg:
5754 case Match_InvalidSVEPredicateSReg:
5755 case Match_InvalidSVEPredicateDReg:
5756 return Error(Loc, "invalid predicate register.");
5757 case Match_InvalidSVEPredicate3bAnyReg:
5758 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
5759 case Match_InvalidSVEPNPredicateB_p8to15Reg:
5760 case Match_InvalidSVEPNPredicateH_p8to15Reg:
5761 case Match_InvalidSVEPNPredicateS_p8to15Reg:
5762 case Match_InvalidSVEPNPredicateD_p8to15Reg:
5763 return Error(Loc, "Invalid predicate register, expected PN in range "
5764 "pn8..pn15 with element suffix.");
5765 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
5766 return Error(Loc, "invalid restricted predicate-as-counter register "
5767 "expected pn8..pn15");
5768 case Match_InvalidSVEPNPredicateBReg:
5769 case Match_InvalidSVEPNPredicateHReg:
5770 case Match_InvalidSVEPNPredicateSReg:
5771 case Match_InvalidSVEPNPredicateDReg:
5772 return Error(Loc, "Invalid predicate register, expected PN in range "
5773 "pn0..pn15 with element suffix.");
5774 case Match_InvalidSVEVecLenSpecifier:
5775 return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
5776 case Match_InvalidSVEPredicateListMul2x8:
5777 case Match_InvalidSVEPredicateListMul2x16:
5778 case Match_InvalidSVEPredicateListMul2x32:
5779 case Match_InvalidSVEPredicateListMul2x64:
5780 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
5781 "predicate registers, where the first vector is a multiple of 2 "
5782 "and with correct element type");
5783 case Match_InvalidSVEExactFPImmOperandHalfOne:
5784 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
5785 case Match_InvalidSVEExactFPImmOperandHalfTwo:
5786 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
5787 case Match_InvalidSVEExactFPImmOperandZeroOne:
5788 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
5789 case Match_InvalidMatrixTileVectorH8:
5790 case Match_InvalidMatrixTileVectorV8:
5791 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
5792 case Match_InvalidMatrixTileVectorH16:
5793 case Match_InvalidMatrixTileVectorV16:
5794 return Error(Loc,
5795 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
5796 case Match_InvalidMatrixTileVectorH32:
5797 case Match_InvalidMatrixTileVectorV32:
5798 return Error(Loc,
5799 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
5800 case Match_InvalidMatrixTileVectorH64:
5801 case Match_InvalidMatrixTileVectorV64:
5802 return Error(Loc,
5803 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
5804 case Match_InvalidMatrixTileVectorH128:
5805 case Match_InvalidMatrixTileVectorV128:
5806 return Error(Loc,
5807 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
5808 case Match_InvalidMatrixTile32:
5809 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
5810 case Match_InvalidMatrixTile64:
5811 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
5812