Bug Summary

File:build/source/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Warning:line 7930, column 22
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name AArch64AsmParser.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-16/lib/clang/16 -I lib/Target/AArch64/AsmParser -I /build/source/llvm/lib/Target/AArch64/AsmParser -I /build/source/llvm/lib/Target/AArch64 -I lib/Target/AArch64 -I include -I /build/source/llvm/include -I lib/Target/AArch64/AsmParser/.. -I /build/source/llvm/lib/Target/AArch64/AsmParser/.. -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/source/= -fcoverage-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/source/= -source-date-epoch 1670191760 -O2 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/= -ferror-limit 19 -fvisibility=hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-12-05-012027-15999-1 -x c++ /build/source/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
10#include "MCTargetDesc/AArch64AddressingModes.h"
11#include "MCTargetDesc/AArch64InstPrinter.h"
12#include "MCTargetDesc/AArch64MCExpr.h"
13#include "MCTargetDesc/AArch64MCTargetDesc.h"
14#include "MCTargetDesc/AArch64TargetStreamer.h"
15#include "TargetInfo/AArch64TargetInfo.h"
16#include "Utils/AArch64BaseInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringExtras.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/StringSwitch.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
31#include "llvm/MC/MCLinkerOptimizationHint.h"
32#include "llvm/MC/MCObjectFileInfo.h"
33#include "llvm/MC/MCParser/MCAsmLexer.h"
34#include "llvm/MC/MCParser/MCAsmParser.h"
35#include "llvm/MC/MCParser/MCAsmParserExtension.h"
36#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
37#include "llvm/MC/MCParser/MCTargetAsmParser.h"
38#include "llvm/MC/MCRegisterInfo.h"
39#include "llvm/MC/MCStreamer.h"
40#include "llvm/MC/MCSubtargetInfo.h"
41#include "llvm/MC/MCSymbol.h"
42#include "llvm/MC/MCTargetOptions.h"
43#include "llvm/MC/MCValue.h"
44#include "llvm/MC/SubtargetFeature.h"
45#include "llvm/MC/TargetRegistry.h"
46#include "llvm/Support/Casting.h"
47#include "llvm/Support/Compiler.h"
48#include "llvm/Support/ErrorHandling.h"
49#include "llvm/Support/MathExtras.h"
50#include "llvm/Support/SMLoc.h"
51#include "llvm/Support/AArch64TargetParser.h"
52#include "llvm/Support/TargetParser.h"
53#include "llvm/Support/raw_ostream.h"
54#include <cassert>
55#include <cctype>
56#include <cstdint>
57#include <cstdio>
58#include <optional>
59#include <string>
60#include <tuple>
61#include <utility>
62#include <vector>
63
64using namespace llvm;
65
66namespace {
67
68enum class RegKind {
69 Scalar,
70 NeonVector,
71 SVEDataVector,
72 SVEPredicateAsCounter,
73 SVEPredicateVector,
74 Matrix,
75 LookupTable
76};
77
78enum class MatrixKind { Array, Tile, Row, Col };
79
80enum RegConstraintEqualityTy {
81 EqualsReg,
82 EqualsSuperReg,
83 EqualsSubReg
84};
85
86class AArch64AsmParser : public MCTargetAsmParser {
87private:
88 StringRef Mnemonic; ///< Instruction mnemonic.
89
90 // Map of register aliases registers via the .req directive.
91 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
92
93 class PrefixInfo {
94 public:
95 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
96 PrefixInfo Prefix;
97 switch (Inst.getOpcode()) {
98 case AArch64::MOVPRFX_ZZ:
99 Prefix.Active = true;
100 Prefix.Dst = Inst.getOperand(0).getReg();
101 break;
102 case AArch64::MOVPRFX_ZPmZ_B:
103 case AArch64::MOVPRFX_ZPmZ_H:
104 case AArch64::MOVPRFX_ZPmZ_S:
105 case AArch64::MOVPRFX_ZPmZ_D:
106 Prefix.Active = true;
107 Prefix.Predicated = true;
108 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
109 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 110
, __extension__ __PRETTY_FUNCTION__))
110 "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 110
, __extension__ __PRETTY_FUNCTION__))
;
111 Prefix.Dst = Inst.getOperand(0).getReg();
112 Prefix.Pg = Inst.getOperand(2).getReg();
113 break;
114 case AArch64::MOVPRFX_ZPzZ_B:
115 case AArch64::MOVPRFX_ZPzZ_H:
116 case AArch64::MOVPRFX_ZPzZ_S:
117 case AArch64::MOVPRFX_ZPzZ_D:
118 Prefix.Active = true;
119 Prefix.Predicated = true;
120 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
121 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 122
, __extension__ __PRETTY_FUNCTION__))
122 "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 122
, __extension__ __PRETTY_FUNCTION__))
;
123 Prefix.Dst = Inst.getOperand(0).getReg();
124 Prefix.Pg = Inst.getOperand(1).getReg();
125 break;
126 default:
127 break;
128 }
129
130 return Prefix;
131 }
132
133 PrefixInfo() = default;
134 bool isActive() const { return Active; }
135 bool isPredicated() const { return Predicated; }
136 unsigned getElementSize() const {
137 assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail
("Predicated", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 137, __extension__ __PRETTY_FUNCTION__))
;
138 return ElementSize;
139 }
140 unsigned getDstReg() const { return Dst; }
141 unsigned getPgReg() const {
142 assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail
("Predicated", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 142, __extension__ __PRETTY_FUNCTION__))
;
143 return Pg;
144 }
145
146 private:
147 bool Active = false;
148 bool Predicated = false;
149 unsigned ElementSize;
150 unsigned Dst;
151 unsigned Pg;
152 } NextPrefix;
153
154 AArch64TargetStreamer &getTargetStreamer() {
155 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
156 return static_cast<AArch64TargetStreamer &>(TS);
157 }
158
159 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
160
161 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
163 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
164 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
165 std::string &Suggestion);
166 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
167 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
168 bool parseRegister(OperandVector &Operands);
169 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
170 bool parseNeonVectorList(OperandVector &Operands);
171 bool parseOptionalMulOperand(OperandVector &Operands);
172 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
173 bool parseKeywordOperand(OperandVector &Operands);
174 bool parseOperand(OperandVector &Operands, bool isCondCode,
175 bool invertCondCode);
176 bool parseImmExpr(int64_t &Out);
177 bool parseComma();
178 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
179 unsigned Last);
180
181 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
182 OperandVector &Operands);
183
184 bool parseDirectiveArch(SMLoc L);
185 bool parseDirectiveArchExtension(SMLoc L);
186 bool parseDirectiveCPU(SMLoc L);
187 bool parseDirectiveInst(SMLoc L);
188
189 bool parseDirectiveTLSDescCall(SMLoc L);
190
191 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
192 bool parseDirectiveLtorg(SMLoc L);
193
194 bool parseDirectiveReq(StringRef Name, SMLoc L);
195 bool parseDirectiveUnreq(SMLoc L);
196 bool parseDirectiveCFINegateRAState();
197 bool parseDirectiveCFIBKeyFrame();
198 bool parseDirectiveCFIMTETaggedFrame();
199
200 bool parseDirectiveVariantPCS(SMLoc L);
201
202 bool parseDirectiveSEHAllocStack(SMLoc L);
203 bool parseDirectiveSEHPrologEnd(SMLoc L);
204 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
205 bool parseDirectiveSEHSaveFPLR(SMLoc L);
206 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
207 bool parseDirectiveSEHSaveReg(SMLoc L);
208 bool parseDirectiveSEHSaveRegX(SMLoc L);
209 bool parseDirectiveSEHSaveRegP(SMLoc L);
210 bool parseDirectiveSEHSaveRegPX(SMLoc L);
211 bool parseDirectiveSEHSaveLRPair(SMLoc L);
212 bool parseDirectiveSEHSaveFReg(SMLoc L);
213 bool parseDirectiveSEHSaveFRegX(SMLoc L);
214 bool parseDirectiveSEHSaveFRegP(SMLoc L);
215 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
216 bool parseDirectiveSEHSetFP(SMLoc L);
217 bool parseDirectiveSEHAddFP(SMLoc L);
218 bool parseDirectiveSEHNop(SMLoc L);
219 bool parseDirectiveSEHSaveNext(SMLoc L);
220 bool parseDirectiveSEHEpilogStart(SMLoc L);
221 bool parseDirectiveSEHEpilogEnd(SMLoc L);
222 bool parseDirectiveSEHTrapFrame(SMLoc L);
223 bool parseDirectiveSEHMachineFrame(SMLoc L);
224 bool parseDirectiveSEHContext(SMLoc L);
225 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
226 bool parseDirectiveSEHPACSignLR(SMLoc L);
227 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
228
229 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
230 SmallVectorImpl<SMLoc> &Loc);
231 unsigned getNumRegsForRegKind(RegKind K);
232 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
233 OperandVector &Operands, MCStreamer &Out,
234 uint64_t &ErrorInfo,
235 bool MatchingInlineAsm) override;
236/// @name Auto-generated Match Functions
237/// {
238
239#define GET_ASSEMBLER_HEADER
240#include "AArch64GenAsmMatcher.inc"
241
242 /// }
243
244 OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
245 OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
246 RegKind MatchKind);
247 OperandMatchResultTy tryParseMatrixRegister(OperandVector &Operands);
248 OperandMatchResultTy tryParseSVCR(OperandVector &Operands);
249 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
250 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
251 OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
252 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
253 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
254 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
255 template <bool IsSVEPrefetch = false>
256 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
257 OperandMatchResultTy tryParseRPRFMOperand(OperandVector &Operands);
258 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
259 OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
260 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
261 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
262 template<bool AddFPZeroAsLiteral>
263 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
264 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
265 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
266 bool tryParseNeonVectorRegister(OperandVector &Operands);
267 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
268 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
269 OperandMatchResultTy tryParseSyspXzrPair(OperandVector &Operands);
270 template <bool ParseShiftExtend,
271 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
272 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
273 OperandMatchResultTy tryParseZTOperand(OperandVector &Operands);
274 template <bool ParseShiftExtend, bool ParseSuffix>
275 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
276 template <RegKind RK>
277 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
278 template <RegKind VectorKind>
279 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
280 bool ExpectMatch = false);
281 OperandMatchResultTy tryParseMatrixTileList(OperandVector &Operands);
282 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
283 OperandMatchResultTy tryParseSVEVecLenSpecifier(OperandVector &Operands);
284 OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands);
285 OperandMatchResultTy tryParseImmRange(OperandVector &Operands);
286
287public:
288 enum AArch64MatchResultTy {
289 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
290#define GET_OPERAND_DIAGNOSTIC_TYPES
291#include "AArch64GenAsmMatcher.inc"
292 };
293 bool IsILP32;
294
295 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
296 const MCInstrInfo &MII, const MCTargetOptions &Options)
297 : MCTargetAsmParser(Options, STI, MII) {
298 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
299 MCAsmParserExtension::Initialize(Parser);
300 MCStreamer &S = getParser().getStreamer();
301 if (S.getTargetStreamer() == nullptr)
302 new AArch64TargetStreamer(S);
303
304 // Alias .hword/.word/.[dx]word to the target-independent
305 // .2byte/.4byte/.8byte directives as they have the same form and
306 // semantics:
307 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
308 Parser.addAliasForDirective(".hword", ".2byte");
309 Parser.addAliasForDirective(".word", ".4byte");
310 Parser.addAliasForDirective(".dword", ".8byte");
311 Parser.addAliasForDirective(".xword", ".8byte");
312
313 // Initialize the set of available features.
314 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
315 }
316
317 bool areEqualRegs(const MCParsedAsmOperand &Op1,
318 const MCParsedAsmOperand &Op2) const override;
319 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
320 SMLoc NameLoc, OperandVector &Operands) override;
321 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
322 OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
323 SMLoc &EndLoc) override;
324 bool ParseDirective(AsmToken DirectiveID) override;
325 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
326 unsigned Kind) override;
327
328 static bool classifySymbolRef(const MCExpr *Expr,
329 AArch64MCExpr::VariantKind &ELFRefKind,
330 MCSymbolRefExpr::VariantKind &DarwinRefKind,
331 int64_t &Addend);
332};
333
334/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
335/// instruction.
336class AArch64Operand : public MCParsedAsmOperand {
337private:
338 enum KindTy {
339 k_Immediate,
340 k_ShiftedImm,
341 k_ImmRange,
342 k_CondCode,
343 k_Register,
344 k_MatrixRegister,
345 k_MatrixTileList,
346 k_SVCR,
347 k_VectorList,
348 k_VectorIndex,
349 k_Token,
350 k_SysReg,
351 k_SysCR,
352 k_Prefetch,
353 k_ShiftExtend,
354 k_FPImm,
355 k_Barrier,
356 k_PSBHint,
357 k_BTIHint,
358 } Kind;
359
360 SMLoc StartLoc, EndLoc;
361
362 struct TokOp {
363 const char *Data;
364 unsigned Length;
365 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
366 };
367
368 // Separate shift/extend operand.
369 struct ShiftExtendOp {
370 AArch64_AM::ShiftExtendType Type;
371 unsigned Amount;
372 bool HasExplicitAmount;
373 };
374
375 struct RegOp {
376 unsigned RegNum;
377 RegKind Kind;
378 int ElementWidth;
379
380 // The register may be allowed as a different register class,
381 // e.g. for GPR64as32 or GPR32as64.
382 RegConstraintEqualityTy EqualityTy;
383
384 // In some cases the shift/extend needs to be explicitly parsed together
385 // with the register, rather than as a separate operand. This is needed
386 // for addressing modes where the instruction as a whole dictates the
387 // scaling/extend, rather than specific bits in the instruction.
388 // By parsing them as a single operand, we avoid the need to pass an
389 // extra operand in all CodeGen patterns (because all operands need to
390 // have an associated value), and we avoid the need to update TableGen to
391 // accept operands that have no associated bits in the instruction.
392 //
393 // An added benefit of parsing them together is that the assembler
394 // can give a sensible diagnostic if the scaling is not correct.
395 //
396 // The default is 'lsl #0' (HasExplicitAmount = false) if no
397 // ShiftExtend is specified.
398 ShiftExtendOp ShiftExtend;
399 };
400
401 struct MatrixRegOp {
402 unsigned RegNum;
403 unsigned ElementWidth;
404 MatrixKind Kind;
405 };
406
407 struct MatrixTileListOp {
408 unsigned RegMask = 0;
409 };
410
411 struct VectorListOp {
412 unsigned RegNum;
413 unsigned Count;
414 unsigned Stride;
415 unsigned NumElements;
416 unsigned ElementWidth;
417 RegKind RegisterKind;
418 };
419
420 struct VectorIndexOp {
421 int Val;
422 };
423
424 struct ImmOp {
425 const MCExpr *Val;
426 };
427
428 struct ShiftedImmOp {
429 const MCExpr *Val;
430 unsigned ShiftAmount;
431 };
432
433 struct ImmRangeOp {
434 unsigned First;
435 unsigned Last;
436 };
437
438 struct CondCodeOp {
439 AArch64CC::CondCode Code;
440 };
441
442 struct FPImmOp {
443 uint64_t Val; // APFloat value bitcasted to uint64_t.
444 bool IsExact; // describes whether parsed value was exact.
445 };
446
447 struct BarrierOp {
448 const char *Data;
449 unsigned Length;
450 unsigned Val; // Not the enum since not all values have names.
451 bool HasnXSModifier;
452 };
453
454 struct SysRegOp {
455 const char *Data;
456 unsigned Length;
457 uint32_t MRSReg;
458 uint32_t MSRReg;
459 uint32_t PStateField;
460 };
461
462 struct SysCRImmOp {
463 unsigned Val;
464 };
465
466 struct PrefetchOp {
467 const char *Data;
468 unsigned Length;
469 unsigned Val;
470 };
471
472 struct PSBHintOp {
473 const char *Data;
474 unsigned Length;
475 unsigned Val;
476 };
477
478 struct BTIHintOp {
479 const char *Data;
480 unsigned Length;
481 unsigned Val;
482 };
483
484 struct SVCROp {
485 const char *Data;
486 unsigned Length;
487 unsigned PStateField;
488 };
489
490 union {
491 struct TokOp Tok;
492 struct RegOp Reg;
493 struct MatrixRegOp MatrixReg;
494 struct MatrixTileListOp MatrixTileList;
495 struct VectorListOp VectorList;
496 struct VectorIndexOp VectorIndex;
497 struct ImmOp Imm;
498 struct ShiftedImmOp ShiftedImm;
499 struct ImmRangeOp ImmRange;
500 struct CondCodeOp CondCode;
501 struct FPImmOp FPImm;
502 struct BarrierOp Barrier;
503 struct SysRegOp SysReg;
504 struct SysCRImmOp SysCRImm;
505 struct PrefetchOp Prefetch;
506 struct PSBHintOp PSBHint;
507 struct BTIHintOp BTIHint;
508 struct ShiftExtendOp ShiftExtend;
509 struct SVCROp SVCR;
510 };
511
512 // Keep the MCContext around as the MCExprs may need manipulated during
513 // the add<>Operands() calls.
514 MCContext &Ctx;
515
516public:
517 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
518
519 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
520 Kind = o.Kind;
521 StartLoc = o.StartLoc;
522 EndLoc = o.EndLoc;
523 switch (Kind) {
524 case k_Token:
525 Tok = o.Tok;
526 break;
527 case k_Immediate:
528 Imm = o.Imm;
529 break;
530 case k_ShiftedImm:
531 ShiftedImm = o.ShiftedImm;
532 break;
533 case k_ImmRange:
534 ImmRange = o.ImmRange;
535 break;
536 case k_CondCode:
537 CondCode = o.CondCode;
538 break;
539 case k_FPImm:
540 FPImm = o.FPImm;
541 break;
542 case k_Barrier:
543 Barrier = o.Barrier;
544 break;
545 case k_Register:
546 Reg = o.Reg;
547 break;
548 case k_MatrixRegister:
549 MatrixReg = o.MatrixReg;
550 break;
551 case k_MatrixTileList:
552 MatrixTileList = o.MatrixTileList;
553 break;
554 case k_VectorList:
555 VectorList = o.VectorList;
556 break;
557 case k_VectorIndex:
558 VectorIndex = o.VectorIndex;
559 break;
560 case k_SysReg:
561 SysReg = o.SysReg;
562 break;
563 case k_SysCR:
564 SysCRImm = o.SysCRImm;
565 break;
566 case k_Prefetch:
567 Prefetch = o.Prefetch;
568 break;
569 case k_PSBHint:
570 PSBHint = o.PSBHint;
571 break;
572 case k_BTIHint:
573 BTIHint = o.BTIHint;
574 break;
575 case k_ShiftExtend:
576 ShiftExtend = o.ShiftExtend;
577 break;
578 case k_SVCR:
579 SVCR = o.SVCR;
580 break;
581 }
582 }
583
584 /// getStartLoc - Get the location of the first token of this operand.
585 SMLoc getStartLoc() const override { return StartLoc; }
586 /// getEndLoc - Get the location of the last token of this operand.
587 SMLoc getEndLoc() const override { return EndLoc; }
588
589 StringRef getToken() const {
590 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 590
, __extension__ __PRETTY_FUNCTION__))
;
591 return StringRef(Tok.Data, Tok.Length);
592 }
593
594 bool isTokenSuffix() const {
595 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 595
, __extension__ __PRETTY_FUNCTION__))
;
596 return Tok.IsSuffix;
597 }
598
599 const MCExpr *getImm() const {
600 assert(Kind == k_Immediate && "Invalid access!")(static_cast <bool> (Kind == k_Immediate && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 600
, __extension__ __PRETTY_FUNCTION__))
;
601 return Imm.Val;
602 }
603
604 const MCExpr *getShiftedImmVal() const {
605 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 605
, __extension__ __PRETTY_FUNCTION__))
;
606 return ShiftedImm.Val;
607 }
608
609 unsigned getShiftedImmShift() const {
610 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 610
, __extension__ __PRETTY_FUNCTION__))
;
611 return ShiftedImm.ShiftAmount;
612 }
613
614 unsigned getFirstImmVal() const {
615 assert(Kind == k_ImmRange && "Invalid access!")(static_cast <bool> (Kind == k_ImmRange && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ImmRange && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 615
, __extension__ __PRETTY_FUNCTION__))
;
616 return ImmRange.First;
617 }
618
619 unsigned getLastImmVal() const {
620 assert(Kind == k_ImmRange && "Invalid access!")(static_cast <bool> (Kind == k_ImmRange && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ImmRange && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 620
, __extension__ __PRETTY_FUNCTION__))
;
621 return ImmRange.Last;
622 }
623
624 AArch64CC::CondCode getCondCode() const {
625 assert(Kind == k_CondCode && "Invalid access!")(static_cast <bool> (Kind == k_CondCode && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 625
, __extension__ __PRETTY_FUNCTION__))
;
626 return CondCode.Code;
627 }
628
629 APFloat getFPImm() const {
630 assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 630
, __extension__ __PRETTY_FUNCTION__))
;
631 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
632 }
633
634 bool getFPImmIsExact() const {
635 assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 635
, __extension__ __PRETTY_FUNCTION__))
;
636 return FPImm.IsExact;
637 }
638
639 unsigned getBarrier() const {
640 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 640
, __extension__ __PRETTY_FUNCTION__))
;
641 return Barrier.Val;
642 }
643
644 StringRef getBarrierName() const {
645 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 645
, __extension__ __PRETTY_FUNCTION__))
;
646 return StringRef(Barrier.Data, Barrier.Length);
647 }
648
649 bool getBarriernXSModifier() const {
650 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 650
, __extension__ __PRETTY_FUNCTION__))
;
651 return Barrier.HasnXSModifier;
652 }
653
654 unsigned getReg() const override {
655 assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 655
, __extension__ __PRETTY_FUNCTION__))
;
656 return Reg.RegNum;
657 }
658
659 unsigned getMatrixReg() const {
660 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 660
, __extension__ __PRETTY_FUNCTION__))
;
661 return MatrixReg.RegNum;
662 }
663
664 unsigned getMatrixElementWidth() const {
665 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 665
, __extension__ __PRETTY_FUNCTION__))
;
666 return MatrixReg.ElementWidth;
667 }
668
669 MatrixKind getMatrixKind() const {
670 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 670
, __extension__ __PRETTY_FUNCTION__))
;
671 return MatrixReg.Kind;
672 }
673
674 unsigned getMatrixTileListRegMask() const {
675 assert(isMatrixTileList() && "Invalid access!")(static_cast <bool> (isMatrixTileList() && "Invalid access!"
) ? void (0) : __assert_fail ("isMatrixTileList() && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 675
, __extension__ __PRETTY_FUNCTION__))
;
676 return MatrixTileList.RegMask;
677 }
678
679 RegConstraintEqualityTy getRegEqualityTy() const {
680 assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 680
, __extension__ __PRETTY_FUNCTION__))
;
681 return Reg.EqualityTy;
682 }
683
684 unsigned getVectorListStart() const {
685 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 685
, __extension__ __PRETTY_FUNCTION__))
;
686 return VectorList.RegNum;
687 }
688
689 unsigned getVectorListCount() const {
690 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 690
, __extension__ __PRETTY_FUNCTION__))
;
691 return VectorList.Count;
692 }
693
694 unsigned getVectorListStride() const {
695 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 695
, __extension__ __PRETTY_FUNCTION__))
;
696 return VectorList.Stride;
697 }
698
699 int getVectorIndex() const {
700 assert(Kind == k_VectorIndex && "Invalid access!")(static_cast <bool> (Kind == k_VectorIndex && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 700
, __extension__ __PRETTY_FUNCTION__))
;
701 return VectorIndex.Val;
702 }
703
704 StringRef getSysReg() const {
705 assert(Kind == k_SysReg && "Invalid access!")(static_cast <bool> (Kind == k_SysReg && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 705
, __extension__ __PRETTY_FUNCTION__))
;
706 return StringRef(SysReg.Data, SysReg.Length);
707 }
708
709 unsigned getSysCR() const {
710 assert(Kind == k_SysCR && "Invalid access!")(static_cast <bool> (Kind == k_SysCR && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 710
, __extension__ __PRETTY_FUNCTION__))
;
711 return SysCRImm.Val;
712 }
713
714 unsigned getPrefetch() const {
715 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 715
, __extension__ __PRETTY_FUNCTION__))
;
716 return Prefetch.Val;
717 }
718
719 unsigned getPSBHint() const {
720 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 720
, __extension__ __PRETTY_FUNCTION__))
;
721 return PSBHint.Val;
722 }
723
724 StringRef getPSBHintName() const {
725 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 725
, __extension__ __PRETTY_FUNCTION__))
;
726 return StringRef(PSBHint.Data, PSBHint.Length);
727 }
728
729 unsigned getBTIHint() const {
730 assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 730
, __extension__ __PRETTY_FUNCTION__))
;
731 return BTIHint.Val;
732 }
733
734 StringRef getBTIHintName() const {
735 assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 735
, __extension__ __PRETTY_FUNCTION__))
;
736 return StringRef(BTIHint.Data, BTIHint.Length);
737 }
738
739 StringRef getSVCR() const {
740 assert(Kind == k_SVCR && "Invalid access!")(static_cast <bool> (Kind == k_SVCR && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SVCR && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 740
, __extension__ __PRETTY_FUNCTION__))
;
741 return StringRef(SVCR.Data, SVCR.Length);
742 }
743
744 StringRef getPrefetchName() const {
745 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 745
, __extension__ __PRETTY_FUNCTION__))
;
746 return StringRef(Prefetch.Data, Prefetch.Length);
747 }
748
749 AArch64_AM::ShiftExtendType getShiftExtendType() const {
750 if (Kind == k_ShiftExtend)
751 return ShiftExtend.Type;
752 if (Kind == k_Register)
753 return Reg.ShiftExtend.Type;
754 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 754)
;
755 }
756
757 unsigned getShiftExtendAmount() const {
758 if (Kind == k_ShiftExtend)
759 return ShiftExtend.Amount;
760 if (Kind == k_Register)
761 return Reg.ShiftExtend.Amount;
762 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 762)
;
763 }
764
765 bool hasShiftExtendAmount() const {
766 if (Kind == k_ShiftExtend)
767 return ShiftExtend.HasExplicitAmount;
768 if (Kind == k_Register)
769 return Reg.ShiftExtend.HasExplicitAmount;
770 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 770)
;
771 }
772
773 bool isImm() const override { return Kind == k_Immediate; }
774 bool isMem() const override { return false; }
775
776 bool isUImm6() const {
777 if (!isImm())
778 return false;
779 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
780 if (!MCE)
781 return false;
782 int64_t Val = MCE->getValue();
783 return (Val >= 0 && Val < 64);
784 }
785
786 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
787
788 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
789 return isImmScaled<Bits, Scale>(true);
790 }
791
792 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
793 DiagnosticPredicate isUImmScaled() const {
794 if (IsRange && isImmRange() &&
795 (getLastImmVal() != getFirstImmVal() + Offset))
796 return DiagnosticPredicateTy::NoMatch;
797
798 return isImmScaled<Bits, Scale, IsRange>(false);
799 }
800
801 template <int Bits, int Scale, bool IsRange = false>
802 DiagnosticPredicate isImmScaled(bool Signed) const {
803 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
804 (isImmRange() && !IsRange))
805 return DiagnosticPredicateTy::NoMatch;
806
807 int64_t Val;
808 if (isImmRange())
809 Val = getFirstImmVal();
810 else {
811 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
812 if (!MCE)
813 return DiagnosticPredicateTy::NoMatch;
814 Val = MCE->getValue();
815 }
816
817 int64_t MinVal, MaxVal;
818 if (Signed) {
819 int64_t Shift = Bits - 1;
820 MinVal = (int64_t(1) << Shift) * -Scale;
821 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
822 } else {
823 MinVal = 0;
824 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
825 }
826
827 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
828 return DiagnosticPredicateTy::Match;
829
830 return DiagnosticPredicateTy::NearMatch;
831 }
832
833 DiagnosticPredicate isSVEPattern() const {
834 if (!isImm())
835 return DiagnosticPredicateTy::NoMatch;
836 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
837 if (!MCE)
838 return DiagnosticPredicateTy::NoMatch;
839 int64_t Val = MCE->getValue();
840 if (Val >= 0 && Val < 32)
841 return DiagnosticPredicateTy::Match;
842 return DiagnosticPredicateTy::NearMatch;
843 }
844
845 DiagnosticPredicate isSVEVecLenSpecifier() const {
846 if (!isImm())
847 return DiagnosticPredicateTy::NoMatch;
848 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
849 if (!MCE)
850 return DiagnosticPredicateTy::NoMatch;
851 int64_t Val = MCE->getValue();
852 if (Val >= 0 && Val <= 1)
853 return DiagnosticPredicateTy::Match;
854 return DiagnosticPredicateTy::NearMatch;
855 }
856
857 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
858 AArch64MCExpr::VariantKind ELFRefKind;
859 MCSymbolRefExpr::VariantKind DarwinRefKind;
860 int64_t Addend;
861 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
862 Addend)) {
863 // If we don't understand the expression, assume the best and
864 // let the fixup and relocation code deal with it.
865 return true;
866 }
867
868 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
869 ELFRefKind == AArch64MCExpr::VK_LO12 ||
870 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
871 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
872 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
873 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
874 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
875 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
876 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
877 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
878 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
879 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
880 // Note that we don't range-check the addend. It's adjusted modulo page
881 // size when converted, so there is no "out of range" condition when using
882 // @pageoff.
883 return true;
884 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
885 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
886 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
887 return Addend == 0;
888 }
889
890 return false;
891 }
892
893 template <int Scale> bool isUImm12Offset() const {
894 if (!isImm())
895 return false;
896
897 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
898 if (!MCE)
899 return isSymbolicUImm12Offset(getImm());
900
901 int64_t Val = MCE->getValue();
902 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
903 }
904
905 template <int N, int M>
906 bool isImmInRange() const {
907 if (!isImm())
908 return false;
909 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
910 if (!MCE)
911 return false;
912 int64_t Val = MCE->getValue();
913 return (Val >= N && Val <= M);
914 }
915
916 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
917 // a logical immediate can always be represented when inverted.
918 template <typename T>
919 bool isLogicalImm() const {
920 if (!isImm())
921 return false;
922 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
923 if (!MCE)
924 return false;
925
926 int64_t Val = MCE->getValue();
927 // Avoid left shift by 64 directly.
928 uint64_t Upper = UINT64_C(-1)-1UL << (sizeof(T) * 4) << (sizeof(T) * 4);
929 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
930 if ((Val & Upper) && (Val & Upper) != Upper)
931 return false;
932
933 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
934 }
935
936 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
937
938 bool isImmRange() const { return Kind == k_ImmRange; }
939
940 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
941 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
942 /// immediate that can be shifted by 'Shift'.
943 template <unsigned Width>
944 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
945 if (isShiftedImm() && Width == getShiftedImmShift())
946 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
947 return std::make_pair(CE->getValue(), Width);
948
949 if (isImm())
950 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
951 int64_t Val = CE->getValue();
952 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
953 return std::make_pair(Val >> Width, Width);
954 else
955 return std::make_pair(Val, 0u);
956 }
957
958 return {};
959 }
960
961 bool isAddSubImm() const {
962 if (!isShiftedImm() && !isImm())
963 return false;
964
965 const MCExpr *Expr;
966
967 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
968 if (isShiftedImm()) {
969 unsigned Shift = ShiftedImm.ShiftAmount;
970 Expr = ShiftedImm.Val;
971 if (Shift != 0 && Shift != 12)
972 return false;
973 } else {
974 Expr = getImm();
975 }
976
977 AArch64MCExpr::VariantKind ELFRefKind;
978 MCSymbolRefExpr::VariantKind DarwinRefKind;
979 int64_t Addend;
980 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
981 DarwinRefKind, Addend)) {
982 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
983 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
984 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
985 || ELFRefKind == AArch64MCExpr::VK_LO12
986 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
987 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
988 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
989 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
990 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
991 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
992 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
993 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
994 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
995 }
996
997 // If it's a constant, it should be a real immediate in range.
998 if (auto ShiftedVal = getShiftedVal<12>())
999 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1000
1001 // If it's an expression, we hope for the best and let the fixup/relocation
1002 // code deal with it.
1003 return true;
1004 }
1005
1006 bool isAddSubImmNeg() const {
1007 if (!isShiftedImm() && !isImm())
1008 return false;
1009
1010 // Otherwise it should be a real negative immediate in range.
1011 if (auto ShiftedVal = getShiftedVal<12>())
1012 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1013
1014 return false;
1015 }
1016
1017 // Signed value in the range -128 to +127. For element widths of
1018 // 16 bits or higher it may also be a signed multiple of 256 in the
1019 // range -32768 to +32512.
1020 // For element-width of 8 bits a range of -128 to 255 is accepted,
1021 // since a copy of a byte can be either signed/unsigned.
1022 template <typename T>
1023 DiagnosticPredicate isSVECpyImm() const {
1024 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1025 return DiagnosticPredicateTy::NoMatch;
1026
1027 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1028 std::is_same<int8_t, T>::value;
1029 if (auto ShiftedImm = getShiftedVal<8>())
1030 if (!(IsByte && ShiftedImm->second) &&
1031 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1032 << ShiftedImm->second))
1033 return DiagnosticPredicateTy::Match;
1034
1035 return DiagnosticPredicateTy::NearMatch;
1036 }
1037
1038 // Unsigned value in the range 0 to 255. For element widths of
1039 // 16 bits or higher it may also be a signed multiple of 256 in the
1040 // range 0 to 65280.
1041 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1042 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1043 return DiagnosticPredicateTy::NoMatch;
1044
1045 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1046 std::is_same<int8_t, T>::value;
1047 if (auto ShiftedImm = getShiftedVal<8>())
1048 if (!(IsByte && ShiftedImm->second) &&
1049 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1050 << ShiftedImm->second))
1051 return DiagnosticPredicateTy::Match;
1052
1053 return DiagnosticPredicateTy::NearMatch;
1054 }
1055
1056 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1057 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1058 return DiagnosticPredicateTy::Match;
1059 return DiagnosticPredicateTy::NoMatch;
1060 }
1061
1062 bool isCondCode() const { return Kind == k_CondCode; }
1063
1064 bool isSIMDImmType10() const {
1065 if (!isImm())
1066 return false;
1067 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1068 if (!MCE)
1069 return false;
1070 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
1071 }
1072
1073 template<int N>
1074 bool isBranchTarget() const {
1075 if (!isImm())
1076 return false;
1077 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1078 if (!MCE)
1079 return true;
1080 int64_t Val = MCE->getValue();
1081 if (Val & 0x3)
1082 return false;
1083 assert(N > 0 && "Branch target immediate cannot be 0 bits!")(static_cast <bool> (N > 0 && "Branch target immediate cannot be 0 bits!"
) ? void (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1083
, __extension__ __PRETTY_FUNCTION__))
;
1084 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1085 }
1086
1087 bool
1088 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1089 if (!isImm())
1090 return false;
1091
1092 AArch64MCExpr::VariantKind ELFRefKind;
1093 MCSymbolRefExpr::VariantKind DarwinRefKind;
1094 int64_t Addend;
1095 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1096 DarwinRefKind, Addend)) {
1097 return false;
1098 }
1099 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1100 return false;
1101
1102 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1103 }
1104
1105 bool isMovWSymbolG3() const {
1106 return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
1107 }
1108
1109 bool isMovWSymbolG2() const {
1110 return isMovWSymbol(
1111 {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
1112 AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
1113 AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
1114 AArch64MCExpr::VK_DTPREL_G2});
1115 }
1116
1117 bool isMovWSymbolG1() const {
1118 return isMovWSymbol(
1119 {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
1120 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
1121 AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
1122 AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
1123 AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
1124 }
1125
1126 bool isMovWSymbolG0() const {
1127 return isMovWSymbol(
1128 {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
1129 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
1130 AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
1131 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
1132 AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
1133 }
1134
1135 template<int RegWidth, int Shift>
1136 bool isMOVZMovAlias() const {
1137 if (!isImm()) return false;
1138
1139 const MCExpr *E = getImm();
1140 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1141 uint64_t Value = CE->getValue();
1142
1143 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1144 }
1145 // Only supports the case of Shift being 0 if an expression is used as an
1146 // operand
1147 return !Shift && E;
1148 }
1149
1150 template<int RegWidth, int Shift>
1151 bool isMOVNMovAlias() const {
1152 if (!isImm()) return false;
1153
1154 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1155 if (!CE) return false;
1156 uint64_t Value = CE->getValue();
1157
1158 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1159 }
1160
1161 bool isFPImm() const {
1162 return Kind == k_FPImm &&
1163 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1164 }
1165
1166 bool isBarrier() const {
1167 return Kind == k_Barrier && !getBarriernXSModifier();
1168 }
1169 bool isBarriernXS() const {
1170 return Kind == k_Barrier && getBarriernXSModifier();
1171 }
1172 bool isSysReg() const { return Kind == k_SysReg; }
1173
1174 bool isMRSSystemRegister() const {
1175 if (!isSysReg()) return false;
1176
1177 return SysReg.MRSReg != -1U;
1178 }
1179
1180 bool isMSRSystemRegister() const {
1181 if (!isSysReg()) return false;
1182 return SysReg.MSRReg != -1U;
1183 }
1184
1185 bool isSystemPStateFieldWithImm0_1() const {
1186 if (!isSysReg()) return false;
1187 return (SysReg.PStateField == AArch64PState::PAN ||
1188 SysReg.PStateField == AArch64PState::DIT ||
1189 SysReg.PStateField == AArch64PState::UAO ||
1190 SysReg.PStateField == AArch64PState::SSBS);
1191 }
1192
1193 bool isSystemPStateFieldWithImm0_15() const {
1194 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1195 return SysReg.PStateField != -1U;
1196 }
1197
1198 bool isSVCR() const {
1199 if (Kind != k_SVCR)
1200 return false;
1201 return SVCR.PStateField != -1U;
1202 }
1203
1204 bool isReg() const override {
1205 return Kind == k_Register;
1206 }
1207
1208 bool isVectorList() const { return Kind == k_VectorList; }
1209
1210 bool isScalarReg() const {
1211 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1212 }
1213
1214 bool isNeonVectorReg() const {
1215 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1216 }
1217
1218 bool isNeonVectorRegLo() const {
1219 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1220 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1221 Reg.RegNum) ||
1222 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1223 Reg.RegNum));
1224 }
1225
1226 bool isMatrix() const { return Kind == k_MatrixRegister; }
1227 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1228
1229 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1230 RegKind RK;
1231 switch (Class) {
1232 case AArch64::PPRRegClassID:
1233 case AArch64::PPR_3bRegClassID:
1234 case AArch64::PPR_p8to15RegClassID:
1235 RK = RegKind::SVEPredicateAsCounter;
1236 break;
1237 default:
1238 llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class",
"llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1238
)
;
1239 }
1240
1241 return (Kind == k_Register && Reg.Kind == RK) &&
1242 AArch64MCRegisterClasses[Class].contains(getReg());
1243 }
1244
1245 template <unsigned Class> bool isSVEVectorReg() const {
1246 RegKind RK;
1247 switch (Class) {
1248 case AArch64::ZPRRegClassID:
1249 case AArch64::ZPR_3bRegClassID:
1250 case AArch64::ZPR_4bRegClassID:
1251 RK = RegKind::SVEDataVector;
1252 break;
1253 case AArch64::PPRRegClassID:
1254 case AArch64::PPR_3bRegClassID:
1255 RK = RegKind::SVEPredicateVector;
1256 break;
1257 default:
1258 llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class",
"llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1258
)
;
1259 }
1260
1261 return (Kind == k_Register && Reg.Kind == RK) &&
1262 AArch64MCRegisterClasses[Class].contains(getReg());
1263 }
1264
1265 template <unsigned Class> bool isFPRasZPR() const {
1266 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1267 AArch64MCRegisterClasses[Class].contains(getReg());
1268 }
1269
1270 template <int ElementWidth, unsigned Class>
1271 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1272 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1273 return DiagnosticPredicateTy::NoMatch;
1274
1275 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1276 return DiagnosticPredicateTy::Match;
1277
1278 return DiagnosticPredicateTy::NearMatch;
1279 }
1280
1281 template <int ElementWidth, unsigned Class>
1282 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1283 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1284 return DiagnosticPredicateTy::NoMatch;
1285
1286 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1287 return DiagnosticPredicateTy::Match;
1288
1289 return DiagnosticPredicateTy::NearMatch;
1290 }
1291
1292 template <int ElementWidth, unsigned Class>
1293 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1294 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1295 return DiagnosticPredicateTy::NoMatch;
1296
1297 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1298 return DiagnosticPredicateTy::Match;
1299
1300 return DiagnosticPredicateTy::NearMatch;
1301 }
1302
1303 template <int ElementWidth, unsigned Class,
1304 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1305 bool ShiftWidthAlwaysSame>
1306 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1307 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1308 if (!VectorMatch.isMatch())
1309 return DiagnosticPredicateTy::NoMatch;
1310
1311 // Give a more specific diagnostic when the user has explicitly typed in
1312 // a shift-amount that does not match what is expected, but for which
1313 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1314 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1315 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1316 ShiftExtendTy == AArch64_AM::SXTW) &&
1317 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1318 return DiagnosticPredicateTy::NoMatch;
1319
1320 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1321 return DiagnosticPredicateTy::Match;
1322
1323 return DiagnosticPredicateTy::NearMatch;
1324 }
1325
1326 bool isGPR32as64() const {
1327 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1328 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1329 }
1330
1331 bool isGPR64as32() const {
1332 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1333 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1334 }
1335
1336 bool isGPR64x8() const {
1337 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1338 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1339 Reg.RegNum);
1340 }
1341
1342 bool isWSeqPair() const {
1343 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1344 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1345 Reg.RegNum);
1346 }
1347
1348 bool isXSeqPair() const {
1349 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1350 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1351 Reg.RegNum);
1352 }
1353
1354 bool isSyspXzrPair() const {
1355 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1356 }
1357
1358 template<int64_t Angle, int64_t Remainder>
1359 DiagnosticPredicate isComplexRotation() const {
1360 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1361
1362 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1363 if (!CE) return DiagnosticPredicateTy::NoMatch;
1364 uint64_t Value = CE->getValue();
1365
1366 if (Value % Angle == Remainder && Value <= 270)
1367 return DiagnosticPredicateTy::Match;
1368 return DiagnosticPredicateTy::NearMatch;
1369 }
1370
1371 template <unsigned RegClassID> bool isGPR64() const {
1372 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1373 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1374 }
1375
1376 template <unsigned RegClassID, int ExtWidth>
1377 DiagnosticPredicate isGPR64WithShiftExtend() const {
1378 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1379 return DiagnosticPredicateTy::NoMatch;
1380
1381 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1382 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1383 return DiagnosticPredicateTy::Match;
1384 return DiagnosticPredicateTy::NearMatch;
1385 }
1386
1387 /// Is this a vector list with the type implicit (presumably attached to the
1388 /// instruction itself)?
1389 template <RegKind VectorKind, unsigned NumRegs>
1390 bool isImplicitlyTypedVectorList() const {
1391 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1392 VectorList.NumElements == 0 &&
1393 VectorList.RegisterKind == VectorKind;
1394 }
1395
1396 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1397 unsigned ElementWidth, unsigned Stride = 1>
1398 bool isTypedVectorList() const {
1399 if (Kind != k_VectorList)
1400 return false;
1401 if (VectorList.Count != NumRegs)
1402 return false;
1403 if (VectorList.RegisterKind != VectorKind)
1404 return false;
1405 if (VectorList.ElementWidth != ElementWidth)
1406 return false;
1407 if (VectorList.Stride != Stride)
1408 return false;
1409 return VectorList.NumElements == NumElements;
1410 }
1411
1412 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1413 unsigned ElementWidth>
1414 DiagnosticPredicate isTypedVectorListMultiple() const {
1415 bool Res =
1416 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1417 if (!Res)
1418 return DiagnosticPredicateTy::NoMatch;
1419 if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0)
1420 return DiagnosticPredicateTy::NearMatch;
1421 return DiagnosticPredicateTy::Match;
1422 }
1423
1424 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1425 unsigned ElementWidth>
1426 DiagnosticPredicate isTypedVectorListStrided() const {
1427 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1428 ElementWidth, Stride>();
1429 if (!Res)
1430 return DiagnosticPredicateTy::NoMatch;
1431 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1432 ((VectorList.RegNum >= AArch64::Z16) &&
1433 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1434 return DiagnosticPredicateTy::Match;
1435 return DiagnosticPredicateTy::NoMatch;
1436 }
1437
1438 template <int Min, int Max>
1439 DiagnosticPredicate isVectorIndex() const {
1440 if (Kind != k_VectorIndex)
1441 return DiagnosticPredicateTy::NoMatch;
1442 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1443 return DiagnosticPredicateTy::Match;
1444 return DiagnosticPredicateTy::NearMatch;
1445 }
1446
1447 bool isToken() const override { return Kind == k_Token; }
1448
1449 bool isTokenEqual(StringRef Str) const {
1450 return Kind == k_Token && getToken() == Str;
1451 }
1452 bool isSysCR() const { return Kind == k_SysCR; }
1453 bool isPrefetch() const { return Kind == k_Prefetch; }
1454 bool isPSBHint() const { return Kind == k_PSBHint; }
1455 bool isBTIHint() const { return Kind == k_BTIHint; }
1456 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1457 bool isShifter() const {
1458 if (!isShiftExtend())
1459 return false;
1460
1461 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1462 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1463 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1464 ST == AArch64_AM::MSL);
1465 }
1466
1467 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1468 if (Kind != k_FPImm)
1469 return DiagnosticPredicateTy::NoMatch;
1470
1471 if (getFPImmIsExact()) {
1472 // Lookup the immediate from table of supported immediates.
1473 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1474 assert(Desc && "Unknown enum value")(static_cast <bool> (Desc && "Unknown enum value"
) ? void (0) : __assert_fail ("Desc && \"Unknown enum value\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1474
, __extension__ __PRETTY_FUNCTION__))
;
1475
1476 // Calculate its FP value.
1477 APFloat RealVal(APFloat::IEEEdouble());
1478 auto StatusOrErr =
1479 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1480 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1481 llvm_unreachable("FP immediate is not exact")::llvm::llvm_unreachable_internal("FP immediate is not exact"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1481
)
;
1482
1483 if (getFPImm().bitwiseIsEqual(RealVal))
1484 return DiagnosticPredicateTy::Match;
1485 }
1486
1487 return DiagnosticPredicateTy::NearMatch;
1488 }
1489
1490 template <unsigned ImmA, unsigned ImmB>
1491 DiagnosticPredicate isExactFPImm() const {
1492 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1493 if ((Res = isExactFPImm<ImmA>()))
1494 return DiagnosticPredicateTy::Match;
1495 if ((Res = isExactFPImm<ImmB>()))
1496 return DiagnosticPredicateTy::Match;
1497 return Res;
1498 }
1499
1500 bool isExtend() const {
1501 if (!isShiftExtend())
1502 return false;
1503
1504 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1505 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1506 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1507 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1508 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1509 ET == AArch64_AM::LSL) &&
1510 getShiftExtendAmount() <= 4;
1511 }
1512
1513 bool isExtend64() const {
1514 if (!isExtend())
1515 return false;
1516 // Make sure the extend expects a 32-bit source register.
1517 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1518 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1519 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1520 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1521 }
1522
1523 bool isExtendLSL64() const {
1524 if (!isExtend())
1525 return false;
1526 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1527 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1528 ET == AArch64_AM::LSL) &&
1529 getShiftExtendAmount() <= 4;
1530 }
1531
1532 template<int Width> bool isMemXExtend() const {
1533 if (!isExtend())
1534 return false;
1535 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1536 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1537 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1538 getShiftExtendAmount() == 0);
1539 }
1540
1541 template<int Width> bool isMemWExtend() const {
1542 if (!isExtend())
1543 return false;
1544 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1545 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1546 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1547 getShiftExtendAmount() == 0);
1548 }
1549
1550 template <unsigned width>
1551 bool isArithmeticShifter() const {
1552 if (!isShifter())
1553 return false;
1554
1555 // An arithmetic shifter is LSL, LSR, or ASR.
1556 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1557 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1558 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1559 }
1560
1561 template <unsigned width>
1562 bool isLogicalShifter() const {
1563 if (!isShifter())
1564 return false;
1565
1566 // A logical shifter is LSL, LSR, ASR or ROR.
1567 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1568 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1569 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1570 getShiftExtendAmount() < width;
1571 }
1572
1573 bool isMovImm32Shifter() const {
1574 if (!isShifter())
1575 return false;
1576
1577 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1578 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1579 if (ST != AArch64_AM::LSL)
1580 return false;
1581 uint64_t Val = getShiftExtendAmount();
1582 return (Val == 0 || Val == 16);
1583 }
1584
1585 bool isMovImm64Shifter() const {
1586 if (!isShifter())
1587 return false;
1588
1589 // A MOVi shifter is LSL of 0 or 16.
1590 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1591 if (ST != AArch64_AM::LSL)
1592 return false;
1593 uint64_t Val = getShiftExtendAmount();
1594 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1595 }
1596
1597 bool isLogicalVecShifter() const {
1598 if (!isShifter())
1599 return false;
1600
1601 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1602 unsigned Shift = getShiftExtendAmount();
1603 return getShiftExtendType() == AArch64_AM::LSL &&
1604 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1605 }
1606
1607 bool isLogicalVecHalfWordShifter() const {
1608 if (!isLogicalVecShifter())
1609 return false;
1610
1611 // A logical vector shifter is a left shift by 0 or 8.
1612 unsigned Shift = getShiftExtendAmount();
1613 return getShiftExtendType() == AArch64_AM::LSL &&
1614 (Shift == 0 || Shift == 8);
1615 }
1616
1617 bool isMoveVecShifter() const {
1618 if (!isShiftExtend())
1619 return false;
1620
1621 // A logical vector shifter is a left shift by 8 or 16.
1622 unsigned Shift = getShiftExtendAmount();
1623 return getShiftExtendType() == AArch64_AM::MSL &&
1624 (Shift == 8 || Shift == 16);
1625 }
1626
1627 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1628 // to LDUR/STUR when the offset is not legal for the former but is for
1629 // the latter. As such, in addition to checking for being a legal unscaled
1630 // address, also check that it is not a legal scaled address. This avoids
1631 // ambiguity in the matcher.
1632 template<int Width>
1633 bool isSImm9OffsetFB() const {
1634 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1635 }
1636
1637 bool isAdrpLabel() const {
1638 // Validation was handled during parsing, so we just verify that
1639 // something didn't go haywire.
1640 if (!isImm())
1641 return false;
1642
1643 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1644 int64_t Val = CE->getValue();
1645 int64_t Min = - (4096 * (1LL << (21 - 1)));
1646 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1647 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1648 }
1649
1650 return true;
1651 }
1652
1653 bool isAdrLabel() const {
1654 // Validation was handled during parsing, so we just verify that
1655 // something didn't go haywire.
1656 if (!isImm())
1657 return false;
1658
1659 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1660 int64_t Val = CE->getValue();
1661 int64_t Min = - (1LL << (21 - 1));
1662 int64_t Max = ((1LL << (21 - 1)) - 1);
1663 return Val >= Min && Val <= Max;
1664 }
1665
1666 return true;
1667 }
1668
1669 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1670 DiagnosticPredicate isMatrixRegOperand() const {
1671 if (!isMatrix())
1672 return DiagnosticPredicateTy::NoMatch;
1673 if (getMatrixKind() != Kind ||
1674 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1675 EltSize != getMatrixElementWidth())
1676 return DiagnosticPredicateTy::NearMatch;
1677 return DiagnosticPredicateTy::Match;
1678 }
1679
1680 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1681 // Add as immediates when possible. Null MCExpr = 0.
1682 if (!Expr)
1683 Inst.addOperand(MCOperand::createImm(0));
1684 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1685 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1686 else
1687 Inst.addOperand(MCOperand::createExpr(Expr));
1688 }
1689
1690 void addRegOperands(MCInst &Inst, unsigned N) const {
1691 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1691
, __extension__ __PRETTY_FUNCTION__))
;
1692 Inst.addOperand(MCOperand::createReg(getReg()));
1693 }
1694
1695 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1696 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1696
, __extension__ __PRETTY_FUNCTION__))
;
1697 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1698 }
1699
1700 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1701 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1701
, __extension__ __PRETTY_FUNCTION__))
;
1702 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1703
, __extension__ __PRETTY_FUNCTION__))
1703 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1703
, __extension__ __PRETTY_FUNCTION__))
;
1704
1705 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1706 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1707 RI->getEncodingValue(getReg()));
1708
1709 Inst.addOperand(MCOperand::createReg(Reg));
1710 }
1711
1712 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1713 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1713
, __extension__ __PRETTY_FUNCTION__))
;
1714 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1715
, __extension__ __PRETTY_FUNCTION__))
1715 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1715
, __extension__ __PRETTY_FUNCTION__))
;
1716
1717 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1718 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1719 RI->getEncodingValue(getReg()));
1720
1721 Inst.addOperand(MCOperand::createReg(Reg));
1722 }
1723
1724 template <int Width>
1725 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1726 unsigned Base;
1727 switch (Width) {
1728 case 8: Base = AArch64::B0; break;
1729 case 16: Base = AArch64::H0; break;
1730 case 32: Base = AArch64::S0; break;
1731 case 64: Base = AArch64::D0; break;
1732 case 128: Base = AArch64::Q0; break;
1733 default:
1734 llvm_unreachable("Unsupported width")::llvm::llvm_unreachable_internal("Unsupported width", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1734)
;
1735 }
1736 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1737 }
1738
1739 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1740 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1740
, __extension__ __PRETTY_FUNCTION__))
;
1741 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1742
, __extension__ __PRETTY_FUNCTION__))
1742 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1742
, __extension__ __PRETTY_FUNCTION__))
;
1743 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1744 }
1745
1746 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1747 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1747
, __extension__ __PRETTY_FUNCTION__))
;
1748 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1749
, __extension__ __PRETTY_FUNCTION__))
1749 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1749
, __extension__ __PRETTY_FUNCTION__))
;
1750 Inst.addOperand(MCOperand::createReg(getReg()));
1751 }
1752
1753 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1754 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1754
, __extension__ __PRETTY_FUNCTION__))
;
1755 Inst.addOperand(MCOperand::createReg(getReg()));
1756 }
1757
1758 enum VecListIndexType {
1759 VecListIdx_DReg = 0,
1760 VecListIdx_QReg = 1,
1761 VecListIdx_ZReg = 2,
1762 VecListIdx_PReg = 3,
1763 };
1764
1765 template <VecListIndexType RegTy, unsigned NumRegs>
1766 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1767 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1767
, __extension__ __PRETTY_FUNCTION__))
;
1768 static const unsigned FirstRegs[][5] = {
1769 /* DReg */ { AArch64::Q0,
1770 AArch64::D0, AArch64::D0_D1,
1771 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1772 /* QReg */ { AArch64::Q0,
1773 AArch64::Q0, AArch64::Q0_Q1,
1774 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1775 /* ZReg */ { AArch64::Z0,
1776 AArch64::Z0, AArch64::Z0_Z1,
1777 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1778 /* PReg */ { AArch64::P0,
1779 AArch64::P0, AArch64::P0_P1 }
1780 };
1781
1782 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs
<= 4) && " NumRegs must be <= 4 for ZRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1783
, __extension__ __PRETTY_FUNCTION__))
1783 " NumRegs must be <= 4 for ZRegs")(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs
<= 4) && " NumRegs must be <= 4 for ZRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1783
, __extension__ __PRETTY_FUNCTION__))
;
1784
1785 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&(static_cast <bool> ((RegTy != VecListIdx_PReg || NumRegs
<= 2) && " NumRegs must be <= 2 for PRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_PReg || NumRegs <= 2) && \" NumRegs must be <= 2 for PRegs\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1786
, __extension__ __PRETTY_FUNCTION__))
1786 " NumRegs must be <= 2 for PRegs")(static_cast <bool> ((RegTy != VecListIdx_PReg || NumRegs
<= 2) && " NumRegs must be <= 2 for PRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_PReg || NumRegs <= 2) && \" NumRegs must be <= 2 for PRegs\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1786
, __extension__ __PRETTY_FUNCTION__))
;
1787
1788 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1789 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1790 FirstRegs[(unsigned)RegTy][0]));
1791 }
1792
1793 template <unsigned NumRegs>
1794 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1795 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1795
, __extension__ __PRETTY_FUNCTION__))
;
1796 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4")(static_cast <bool> ((NumRegs == 2 || NumRegs == 4) &&
" NumRegs must be 2 or 4") ? void (0) : __assert_fail ("(NumRegs == 2 || NumRegs == 4) && \" NumRegs must be 2 or 4\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1796
, __extension__ __PRETTY_FUNCTION__))
;
1797
1798 switch (NumRegs) {
1799 case 2:
1800 if (getVectorListStart() < AArch64::Z16) {
1801 assert((getVectorListStart() < AArch64::Z8) &&(static_cast <bool> ((getVectorListStart() < AArch64
::Z8) && (getVectorListStart() >= AArch64::Z0) &&
"Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z8) && (getVectorListStart() >= AArch64::Z0) && \"Invalid Register\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1802
, __extension__ __PRETTY_FUNCTION__))
1802 (getVectorListStart() >= AArch64::Z0) && "Invalid Register")(static_cast <bool> ((getVectorListStart() < AArch64
::Z8) && (getVectorListStart() >= AArch64::Z0) &&
"Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z8) && (getVectorListStart() >= AArch64::Z0) && \"Invalid Register\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1802
, __extension__ __PRETTY_FUNCTION__))
;
1803 Inst.addOperand(MCOperand::createReg(
1804 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1805 } else {
1806 assert((getVectorListStart() < AArch64::Z24) &&(static_cast <bool> ((getVectorListStart() < AArch64
::Z24) && (getVectorListStart() >= AArch64::Z16) &&
"Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z24) && (getVectorListStart() >= AArch64::Z16) && \"Invalid Register\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1807
, __extension__ __PRETTY_FUNCTION__))
1807 (getVectorListStart() >= AArch64::Z16) && "Invalid Register")(static_cast <bool> ((getVectorListStart() < AArch64
::Z24) && (getVectorListStart() >= AArch64::Z16) &&
"Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z24) && (getVectorListStart() >= AArch64::Z16) && \"Invalid Register\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1807
, __extension__ __PRETTY_FUNCTION__))
;
1808 Inst.addOperand(MCOperand::createReg(
1809 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1810 }
1811 break;
1812 case 4:
1813 if (getVectorListStart() < AArch64::Z16) {
1814 assert((getVectorListStart() < AArch64::Z4) &&(static_cast <bool> ((getVectorListStart() < AArch64
::Z4) && (getVectorListStart() >= AArch64::Z0) &&
"Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z4) && (getVectorListStart() >= AArch64::Z0) && \"Invalid Register\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1815
, __extension__ __PRETTY_FUNCTION__))
1815 (getVectorListStart() >= AArch64::Z0) && "Invalid Register")(static_cast <bool> ((getVectorListStart() < AArch64
::Z4) && (getVectorListStart() >= AArch64::Z0) &&
"Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z4) && (getVectorListStart() >= AArch64::Z0) && \"Invalid Register\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1815
, __extension__ __PRETTY_FUNCTION__))
;
1816 Inst.addOperand(MCOperand::createReg(
1817 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1818 } else {
1819 assert((getVectorListStart() < AArch64::Z20) &&(static_cast <bool> ((getVectorListStart() < AArch64
::Z20) && (getVectorListStart() >= AArch64::Z16) &&
"Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z20) && (getVectorListStart() >= AArch64::Z16) && \"Invalid Register\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1820
, __extension__ __PRETTY_FUNCTION__))
1820 (getVectorListStart() >= AArch64::Z16) && "Invalid Register")(static_cast <bool> ((getVectorListStart() < AArch64
::Z20) && (getVectorListStart() >= AArch64::Z16) &&
"Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z20) && (getVectorListStart() >= AArch64::Z16) && \"Invalid Register\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1820
, __extension__ __PRETTY_FUNCTION__))
;
1821 Inst.addOperand(MCOperand::createReg(
1822 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1823 }
1824 break;
1825 default:
1826 llvm_unreachable("Unsupported number of registers for strided vec list")::llvm::llvm_unreachable_internal("Unsupported number of registers for strided vec list"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1826
)
;
1827 }
1828 }
1829
1830 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1831 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1831
, __extension__ __PRETTY_FUNCTION__))
;
1832 unsigned RegMask = getMatrixTileListRegMask();
1833 assert(RegMask <= 0xFF && "Invalid mask!")(static_cast <bool> (RegMask <= 0xFF && "Invalid mask!"
) ? void (0) : __assert_fail ("RegMask <= 0xFF && \"Invalid mask!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1833
, __extension__ __PRETTY_FUNCTION__))
;
1834 Inst.addOperand(MCOperand::createImm(RegMask));
1835 }
1836
1837 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1838 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1838
, __extension__ __PRETTY_FUNCTION__))
;
1839 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1840 }
1841
1842 template <unsigned ImmIs0, unsigned ImmIs1>
1843 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1844 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1844
, __extension__ __PRETTY_FUNCTION__))
;
1845 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")(static_cast <bool> (bool(isExactFPImm<ImmIs0, ImmIs1
>()) && "Invalid operand") ? void (0) : __assert_fail
("bool(isExactFPImm<ImmIs0, ImmIs1>()) && \"Invalid operand\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1845
, __extension__ __PRETTY_FUNCTION__))
;
1846 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1847 }
1848
1849 void addImmOperands(MCInst &Inst, unsigned N) const {
1850 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1850
, __extension__ __PRETTY_FUNCTION__))
;
1851 // If this is a pageoff symrefexpr with an addend, adjust the addend
1852 // to be only the page-offset portion. Otherwise, just add the expr
1853 // as-is.
1854 addExpr(Inst, getImm());
1855 }
1856
1857 template <int Shift>
1858 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1859 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1859
, __extension__ __PRETTY_FUNCTION__))
;
1860 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1861 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1862 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1863 } else if (isShiftedImm()) {
1864 addExpr(Inst, getShiftedImmVal());
1865 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1866 } else {
1867 addExpr(Inst, getImm());
1868 Inst.addOperand(MCOperand::createImm(0));
1869 }
1870 }
1871
1872 template <int Shift>
1873 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1874 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1874
, __extension__ __PRETTY_FUNCTION__))
;
1875 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1876 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1877 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1878 } else
1879 llvm_unreachable("Not a shifted negative immediate")::llvm::llvm_unreachable_internal("Not a shifted negative immediate"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1879
)
;
1880 }
1881
1882 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1883 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1883
, __extension__ __PRETTY_FUNCTION__))
;
1884 Inst.addOperand(MCOperand::createImm(getCondCode()));
1885 }
1886
1887 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1888 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1888
, __extension__ __PRETTY_FUNCTION__))
;
1889 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1890 if (!MCE)
1891 addExpr(Inst, getImm());
1892 else
1893 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1894 }
1895
1896 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1897 addImmOperands(Inst, N);
1898 }
1899
1900 template<int Scale>
1901 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1902 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1902
, __extension__ __PRETTY_FUNCTION__))
;
1903 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1904
1905 if (!MCE) {
1906 Inst.addOperand(MCOperand::createExpr(getImm()));
1907 return;
1908 }
1909 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1910 }
1911
1912 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1913 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1913
, __extension__ __PRETTY_FUNCTION__))
;
1914 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1915 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1916 }
1917
1918 template <int Scale>
1919 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1920 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1920
, __extension__ __PRETTY_FUNCTION__))
;
1921 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1922 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1923 }
1924
1925 template <int Scale>
1926 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
1927 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1927
, __extension__ __PRETTY_FUNCTION__))
;
1928 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
1929 }
1930
1931 template <typename T>
1932 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1933 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1933
, __extension__ __PRETTY_FUNCTION__))
;
1934 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1935 std::make_unsigned_t<T> Val = MCE->getValue();
1936 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1937 Inst.addOperand(MCOperand::createImm(encoding));
1938 }
1939
1940 template <typename T>
1941 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1942 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1942
, __extension__ __PRETTY_FUNCTION__))
;
1943 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1944 std::make_unsigned_t<T> Val = ~MCE->getValue();
1945 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1946 Inst.addOperand(MCOperand::createImm(encoding));
1947 }
1948
1949 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1950 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1950
, __extension__ __PRETTY_FUNCTION__))
;
1951 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1952 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1953 Inst.addOperand(MCOperand::createImm(encoding));
1954 }
1955
1956 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1957 // Branch operands don't encode the low bits, so shift them off
1958 // here. If it's a label, however, just put it on directly as there's
1959 // not enough information now to do anything.
1960 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1960
, __extension__ __PRETTY_FUNCTION__))
;
1961 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1962 if (!MCE) {
1963 addExpr(Inst, getImm());
1964 return;
1965 }
1966 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1966
, __extension__ __PRETTY_FUNCTION__))
;
1967 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1968 }
1969
1970 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1971 // Branch operands don't encode the low bits, so shift them off
1972 // here. If it's a label, however, just put it on directly as there's
1973 // not enough information now to do anything.
1974 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1974
, __extension__ __PRETTY_FUNCTION__))
;
1975 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1976 if (!MCE) {
1977 addExpr(Inst, getImm());
1978 return;
1979 }
1980 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1980
, __extension__ __PRETTY_FUNCTION__))
;
1981 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1982 }
1983
1984 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1985 // Branch operands don't encode the low bits, so shift them off
1986 // here. If it's a label, however, just put it on directly as there's
1987 // not enough information now to do anything.
1988 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1988
, __extension__ __PRETTY_FUNCTION__))
;
1989 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1990 if (!MCE) {
1991 addExpr(Inst, getImm());
1992 return;
1993 }
1994 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1994
, __extension__ __PRETTY_FUNCTION__))
;
1995 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1996 }
1997
1998 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1999 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1999
, __extension__ __PRETTY_FUNCTION__))
;
2000 Inst.addOperand(MCOperand::createImm(
2001 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2002 }
2003
2004 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2005 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2005
, __extension__ __PRETTY_FUNCTION__))
;
2006 Inst.addOperand(MCOperand::createImm(getBarrier()));
2007 }
2008
2009 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2010 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2010
, __extension__ __PRETTY_FUNCTION__))
;
2011 Inst.addOperand(MCOperand::createImm(getBarrier()));
2012 }
2013
2014 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2015 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2015
, __extension__ __PRETTY_FUNCTION__))
;
2016
2017 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2018 }
2019
2020 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2021 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2021
, __extension__ __PRETTY_FUNCTION__))
;
2022
2023 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2024 }
2025
2026 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2027 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2027
, __extension__ __PRETTY_FUNCTION__))
;
2028
2029 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2030 }
2031
2032 void addSVCROperands(MCInst &Inst, unsigned N) const {
2033 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2033
, __extension__ __PRETTY_FUNCTION__))
;
2034
2035 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2036 }
2037
2038 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2039 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2039
, __extension__ __PRETTY_FUNCTION__))
;
2040
2041 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2042 }
2043
2044 void addSysCROperands(MCInst &Inst, unsigned N) const {
2045 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2045
, __extension__ __PRETTY_FUNCTION__))
;
2046 Inst.addOperand(MCOperand::createImm(getSysCR()));
2047 }
2048
2049 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2050 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2050
, __extension__ __PRETTY_FUNCTION__))
;
2051 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2052 }
2053
2054 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2055 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2055
, __extension__ __PRETTY_FUNCTION__))
;
2056 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2057 }
2058
2059 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2060 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2060
, __extension__ __PRETTY_FUNCTION__))
;
2061 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2062 }
2063
2064 void addShifterOperands(MCInst &Inst, unsigned N) const {
2065 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2065
, __extension__ __PRETTY_FUNCTION__))
;
2066 unsigned Imm =
2067 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2068 Inst.addOperand(MCOperand::createImm(Imm));
2069 }
2070
2071 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2072 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2072
, __extension__ __PRETTY_FUNCTION__))
;
2073
2074 if (!isScalarReg())
2075 return;
2076
2077 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2078 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2079 .getRegister(RI->getEncodingValue(getReg()));
2080 if (Reg != AArch64::XZR)
2081 llvm_unreachable("wrong register")::llvm::llvm_unreachable_internal("wrong register", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2081)
;
2082
2083 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2084 }
2085
2086 void addExtendOperands(MCInst &Inst, unsigned N) const {
2087 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2087
, __extension__ __PRETTY_FUNCTION__))
;
2088 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2089 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2090 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2091 Inst.addOperand(MCOperand::createImm(Imm));
2092 }
2093
2094 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2095 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2095
, __extension__ __PRETTY_FUNCTION__))
;
2096 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2097 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2098 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2099 Inst.addOperand(MCOperand::createImm(Imm));
2100 }
2101
2102 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2103 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2103
, __extension__ __PRETTY_FUNCTION__))
;
2104 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2105 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2106 Inst.addOperand(MCOperand::createImm(IsSigned));
2107 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2108 }
2109
2110 // For 8-bit load/store instructions with a register offset, both the
2111 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2112 // they're disambiguated by whether the shift was explicit or implicit rather
2113 // than its size.
2114 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2115 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2115
, __extension__ __PRETTY_FUNCTION__))
;
2116 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2117 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2118 Inst.addOperand(MCOperand::createImm(IsSigned));
2119 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2120 }
2121
2122 template<int Shift>
2123 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2124 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2124
, __extension__ __PRETTY_FUNCTION__))
;
2125
2126 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2127 if (CE) {
2128 uint64_t Value = CE->getValue();
2129 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2130 } else {
2131 addExpr(Inst, getImm());
2132 }
2133 }
2134
2135 template<int Shift>
2136 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2137 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2137
, __extension__ __PRETTY_FUNCTION__))
;
2138
2139 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2140 uint64_t Value = CE->getValue();
2141 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2142 }
2143
2144 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2145 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2145
, __extension__ __PRETTY_FUNCTION__))
;
2146 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2147 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2148 }
2149
2150 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2151 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2151
, __extension__ __PRETTY_FUNCTION__))
;
2152 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2153 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2154 }
2155
2156 void print(raw_ostream &OS) const override;
2157
2158 static std::unique_ptr<AArch64Operand>
2159 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2160 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2161 Op->Tok.Data = Str.data();
2162 Op->Tok.Length = Str.size();
2163 Op->Tok.IsSuffix = IsSuffix;
2164 Op->StartLoc = S;
2165 Op->EndLoc = S;
2166 return Op;
2167 }
2168
2169 static std::unique_ptr<AArch64Operand>
2170 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2171 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2172 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2173 unsigned ShiftAmount = 0,
2174 unsigned HasExplicitAmount = false) {
2175 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2176 Op->Reg.RegNum = RegNum;
2177 Op->Reg.Kind = Kind;
2178 Op->Reg.ElementWidth = 0;
2179 Op->Reg.EqualityTy = EqTy;
2180 Op->Reg.ShiftExtend.Type = ExtTy;
2181 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2182 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2183 Op->StartLoc = S;
2184 Op->EndLoc = E;
2185 return Op;
2186 }
2187
2188 static std::unique_ptr<AArch64Operand>
2189 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2190 SMLoc S, SMLoc E, MCContext &Ctx,
2191 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2192 unsigned ShiftAmount = 0,
2193 unsigned HasExplicitAmount = false) {
2194 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
|| Kind == RegKind::SVEPredicateAsCounter) && "Invalid vector kind"
) ? void (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && \"Invalid vector kind\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2197
, __extension__ __PRETTY_FUNCTION__))
2195 Kind == RegKind::SVEPredicateVector ||(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
|| Kind == RegKind::SVEPredicateAsCounter) && "Invalid vector kind"
) ? void (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && \"Invalid vector kind\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2197
, __extension__ __PRETTY_FUNCTION__))
2196 Kind == RegKind::SVEPredicateAsCounter) &&(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
|| Kind == RegKind::SVEPredicateAsCounter) && "Invalid vector kind"
) ? void (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && \"Invalid vector kind\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2197
, __extension__ __PRETTY_FUNCTION__))
2197 "Invalid vector kind")(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
|| Kind == RegKind::SVEPredicateAsCounter) && "Invalid vector kind"
) ? void (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && \"Invalid vector kind\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2197
, __extension__ __PRETTY_FUNCTION__))
;
2198 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2199 HasExplicitAmount);
2200 Op->Reg.ElementWidth = ElementWidth;
2201 return Op;
2202 }
2203
2204 static std::unique_ptr<AArch64Operand>
2205 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2206 unsigned NumElements, unsigned ElementWidth,
2207 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2208 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2209 Op->VectorList.RegNum = RegNum;
2210 Op->VectorList.Count = Count;
2211 Op->VectorList.Stride = Stride;
2212 Op->VectorList.NumElements = NumElements;
2213 Op->VectorList.ElementWidth = ElementWidth;
2214 Op->VectorList.RegisterKind = RegisterKind;
2215 Op->StartLoc = S;
2216 Op->EndLoc = E;
2217 return Op;
2218 }
2219
2220 static std::unique_ptr<AArch64Operand>
2221 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2222 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2223 Op->VectorIndex.Val = Idx;
2224 Op->StartLoc = S;
2225 Op->EndLoc = E;
2226 return Op;
2227 }
2228
2229 static std::unique_ptr<AArch64Operand>
2230 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2231 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2232 Op->MatrixTileList.RegMask = RegMask;
2233 Op->StartLoc = S;
2234 Op->EndLoc = E;
2235 return Op;
2236 }
2237
2238 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2239 const unsigned ElementWidth) {
2240 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2241 RegMap = {
2242 {{0, AArch64::ZAB0},
2243 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2244 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2245 {{8, AArch64::ZAB0},
2246 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2247 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2248 {{16, AArch64::ZAH0},
2249 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2250 {{16, AArch64::ZAH1},
2251 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2252 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2253 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2254 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2255 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2256 };
2257
2258 if (ElementWidth == 64)
2259 OutRegs.insert(Reg);
2260 else {
2261 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2262 assert(!Regs.empty() && "Invalid tile or element width!")(static_cast <bool> (!Regs.empty() && "Invalid tile or element width!"
) ? void (0) : __assert_fail ("!Regs.empty() && \"Invalid tile or element width!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2262
, __extension__ __PRETTY_FUNCTION__))
;
2263 for (auto OutReg : Regs)
2264 OutRegs.insert(OutReg);
2265 }
2266 }
2267
2268 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2269 SMLoc E, MCContext &Ctx) {
2270 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2271 Op->Imm.Val = Val;
2272 Op->StartLoc = S;
2273 Op->EndLoc = E;
2274 return Op;
2275 }
2276
2277 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2278 unsigned ShiftAmount,
2279 SMLoc S, SMLoc E,
2280 MCContext &Ctx) {
2281 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2282 Op->ShiftedImm .Val = Val;
2283 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2284 Op->StartLoc = S;
2285 Op->EndLoc = E;
2286 return Op;
2287 }
2288
2289 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2290 unsigned Last, SMLoc S,
2291 SMLoc E,
2292 MCContext &Ctx) {
2293 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2294 Op->ImmRange.First = First;
2295 Op->ImmRange.Last = Last;
2296 Op->EndLoc = E;
2297 return Op;
2298 }
2299
2300 static std::unique_ptr<AArch64Operand>
2301 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2302 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2303 Op->CondCode.Code = Code;
2304 Op->StartLoc = S;
2305 Op->EndLoc = E;
2306 return Op;
2307 }
2308
2309 static std::unique_ptr<AArch64Operand>
2310 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2311 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2312 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2313 Op->FPImm.IsExact = IsExact;
2314 Op->StartLoc = S;
2315 Op->EndLoc = S;
2316 return Op;
2317 }
2318
2319 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2320 StringRef Str,
2321 SMLoc S,
2322 MCContext &Ctx,
2323 bool HasnXSModifier) {
2324 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2325 Op->Barrier.Val = Val;
2326 Op->Barrier.Data = Str.data();
2327 Op->Barrier.Length = Str.size();
2328 Op->Barrier.HasnXSModifier = HasnXSModifier;
2329 Op->StartLoc = S;
2330 Op->EndLoc = S;
2331 return Op;
2332 }
2333
2334 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2335 uint32_t MRSReg,
2336 uint32_t MSRReg,
2337 uint32_t PStateField,
2338 MCContext &Ctx) {
2339 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2340 Op->SysReg.Data = Str.data();
2341 Op->SysReg.Length = Str.size();
2342 Op->SysReg.MRSReg = MRSReg;
2343 Op->SysReg.MSRReg = MSRReg;
2344 Op->SysReg.PStateField = PStateField;
2345 Op->StartLoc = S;
2346 Op->EndLoc = S;
2347 return Op;
2348 }
2349
2350 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2351 SMLoc E, MCContext &Ctx) {
2352 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2353 Op->SysCRImm.Val = Val;
2354 Op->StartLoc = S;
2355 Op->EndLoc = E;
2356 return Op;
2357 }
2358
2359 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2360 StringRef Str,
2361 SMLoc S,
2362 MCContext &Ctx) {
2363 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2364 Op->Prefetch.Val = Val;
2365 Op->Barrier.Data = Str.data();
2366 Op->Barrier.Length = Str.size();
2367 Op->StartLoc = S;
2368 Op->EndLoc = S;
2369 return Op;
2370 }
2371
2372 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2373 StringRef Str,
2374 SMLoc S,
2375 MCContext &Ctx) {
2376 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2377 Op->PSBHint.Val = Val;
2378 Op->PSBHint.Data = Str.data();
2379 Op->PSBHint.Length = Str.size();
2380 Op->StartLoc = S;
2381 Op->EndLoc = S;
2382 return Op;
2383 }
2384
2385 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2386 StringRef Str,
2387 SMLoc S,
2388 MCContext &Ctx) {
2389 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2390 Op->BTIHint.Val = Val | 32;
2391 Op->BTIHint.Data = Str.data();
2392 Op->BTIHint.Length = Str.size();
2393 Op->StartLoc = S;
2394 Op->EndLoc = S;
2395 return Op;
2396 }
2397
2398 static std::unique_ptr<AArch64Operand>
2399 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2400 SMLoc S, SMLoc E, MCContext &Ctx) {
2401 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2402 Op->MatrixReg.RegNum = RegNum;
2403 Op->MatrixReg.ElementWidth = ElementWidth;
2404 Op->MatrixReg.Kind = Kind;
2405 Op->StartLoc = S;
2406 Op->EndLoc = E;
2407 return Op;
2408 }
2409
2410 static std::unique_ptr<AArch64Operand>
2411 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2412 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2413 Op->SVCR.PStateField = PStateField;
2414 Op->SVCR.Data = Str.data();
2415 Op->SVCR.Length = Str.size();
2416 Op->StartLoc = S;
2417 Op->EndLoc = S;
2418 return Op;
2419 }
2420
2421 static std::unique_ptr<AArch64Operand>
2422 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2423 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2424 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2425 Op->ShiftExtend.Type = ShOp;
2426 Op->ShiftExtend.Amount = Val;
2427 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2428 Op->StartLoc = S;
2429 Op->EndLoc = E;
2430 return Op;
2431 }
2432};
2433
2434} // end anonymous namespace.
2435
2436void AArch64Operand::print(raw_ostream &OS) const {
2437 switch (Kind) {
2438 case k_FPImm:
2439 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2440 if (!getFPImmIsExact())
2441 OS << " (inexact)";
2442 OS << ">";
2443 break;
2444 case k_Barrier: {
2445 StringRef Name = getBarrierName();
2446 if (!Name.empty())
2447 OS << "<barrier " << Name << ">";
2448 else
2449 OS << "<barrier invalid #" << getBarrier() << ">";
2450 break;
2451 }
2452 case k_Immediate:
2453 OS << *getImm();
2454 break;
2455 case k_ShiftedImm: {
2456 unsigned Shift = getShiftedImmShift();
2457 OS << "<shiftedimm ";
2458 OS << *getShiftedImmVal();
2459 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2460 break;
2461 }
2462 case k_ImmRange: {
2463 OS << "<immrange ";
2464 OS << getFirstImmVal();
2465 OS << ":" << getLastImmVal() << ">";
2466 break;
2467 }
2468 case k_CondCode:
2469 OS << "<condcode " << getCondCode() << ">";
2470 break;
2471 case k_VectorList: {
2472 OS << "<vectorlist ";
2473 unsigned Reg = getVectorListStart();
2474 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2475 OS << Reg + i * getVectorListStride() << " ";
2476 OS << ">";
2477 break;
2478 }
2479 case k_VectorIndex:
2480 OS << "<vectorindex " << getVectorIndex() << ">";
2481 break;
2482 case k_SysReg:
2483 OS << "<sysreg: " << getSysReg() << '>';
2484 break;
2485 case k_Token:
2486 OS << "'" << getToken() << "'";
2487 break;
2488 case k_SysCR:
2489 OS << "c" << getSysCR();
2490 break;
2491 case k_Prefetch: {
2492 StringRef Name = getPrefetchName();
2493 if (!Name.empty())
2494 OS << "<prfop " << Name << ">";
2495 else
2496 OS << "<prfop invalid #" << getPrefetch() << ">";
2497 break;
2498 }
2499 case k_PSBHint:
2500 OS << getPSBHintName();
2501 break;
2502 case k_BTIHint:
2503 OS << getBTIHintName();
2504 break;
2505 case k_MatrixRegister:
2506 OS << "<matrix " << getMatrixReg() << ">";
2507 break;
2508 case k_MatrixTileList: {
2509 OS << "<matrixlist ";
2510 unsigned RegMask = getMatrixTileListRegMask();
2511 unsigned MaxBits = 8;
2512 for (unsigned I = MaxBits; I > 0; --I)
2513 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2514 OS << '>';
2515 break;
2516 }
2517 case k_SVCR: {
2518 OS << getSVCR();
2519 break;
2520 }
2521 case k_Register:
2522 OS << "<register " << getReg() << ">";
2523 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2524 break;
2525 [[fallthrough]];
2526 case k_ShiftExtend:
2527 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2528 << getShiftExtendAmount();
2529 if (!hasShiftExtendAmount())
2530 OS << "<imp>";
2531 OS << '>';
2532 break;
2533 }
2534}
2535
2536/// @name Auto-generated Match Functions
2537/// {
2538
2539static unsigned MatchRegisterName(StringRef Name);
2540
2541/// }
2542
2543static unsigned MatchNeonVectorRegName(StringRef Name) {
2544 return StringSwitch<unsigned>(Name.lower())
2545 .Case("v0", AArch64::Q0)
2546 .Case("v1", AArch64::Q1)
2547 .Case("v2", AArch64::Q2)
2548 .Case("v3", AArch64::Q3)
2549 .Case("v4", AArch64::Q4)
2550 .Case("v5", AArch64::Q5)
2551 .Case("v6", AArch64::Q6)
2552 .Case("v7", AArch64::Q7)
2553 .Case("v8", AArch64::Q8)
2554 .Case("v9", AArch64::Q9)
2555 .Case("v10", AArch64::Q10)
2556 .Case("v11", AArch64::Q11)
2557 .Case("v12", AArch64::Q12)
2558 .Case("v13", AArch64::Q13)
2559 .Case("v14", AArch64::Q14)
2560 .Case("v15", AArch64::Q15)
2561 .Case("v16", AArch64::Q16)
2562 .Case("v17", AArch64::Q17)
2563 .Case("v18", AArch64::Q18)
2564 .Case("v19", AArch64::Q19)
2565 .Case("v20", AArch64::Q20)
2566 .Case("v21", AArch64::Q21)
2567 .Case("v22", AArch64::Q22)
2568 .Case("v23", AArch64::Q23)
2569 .Case("v24", AArch64::Q24)
2570 .Case("v25", AArch64::Q25)
2571 .Case("v26", AArch64::Q26)
2572 .Case("v27", AArch64::Q27)
2573 .Case("v28", AArch64::Q28)
2574 .Case("v29", AArch64::Q29)
2575 .Case("v30", AArch64::Q30)
2576 .Case("v31", AArch64::Q31)
2577 .Default(0);
2578}
2579
2580/// Returns an optional pair of (#elements, element-width) if Suffix
2581/// is a valid vector kind. Where the number of elements in a vector
2582/// or the vector width is implicit or explicitly unknown (but still a
2583/// valid suffix kind), 0 is used.
2584static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2585 RegKind VectorKind) {
2586 std::pair<int, int> Res = {-1, -1};
2587
2588 switch (VectorKind) {
2589 case RegKind::NeonVector:
2590 Res =
2591 StringSwitch<std::pair<int, int>>(Suffix.lower())
2592 .Case("", {0, 0})
2593 .Case(".1d", {1, 64})
2594 .Case(".1q", {1, 128})
2595 // '.2h' needed for fp16 scalar pairwise reductions
2596 .Case(".2h", {2, 16})
2597 .Case(".2s", {2, 32})
2598 .Case(".2d", {2, 64})
2599 // '.4b' is another special case for the ARMv8.2a dot product
2600 // operand
2601 .Case(".4b", {4, 8})
2602 .Case(".4h", {4, 16})
2603 .Case(".4s", {4, 32})
2604 .Case(".8b", {8, 8})
2605 .Case(".8h", {8, 16})
2606 .Case(".16b", {16, 8})
2607 // Accept the width neutral ones, too, for verbose syntax. If those
2608 // aren't used in the right places, the token operand won't match so
2609 // all will work out.
2610 .Case(".b", {0, 8})
2611 .Case(".h", {0, 16})
2612 .Case(".s", {0, 32})
2613 .Case(".d", {0, 64})
2614 .Default({-1, -1});
2615 break;
2616 case RegKind::SVEPredicateAsCounter:
2617 case RegKind::SVEPredicateVector:
2618 case RegKind::SVEDataVector:
2619 case RegKind::Matrix:
2620 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2621 .Case("", {0, 0})
2622 .Case(".b", {0, 8})
2623 .Case(".h", {0, 16})
2624 .Case(".s", {0, 32})
2625 .Case(".d", {0, 64})
2626 .Case(".q", {0, 128})
2627 .Default({-1, -1});
2628 break;
2629 default:
2630 llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2630)
;
2631 }
2632
2633 if (Res == std::make_pair(-1, -1))
2634 return std::nullopt;
2635
2636 return Optional<std::pair<int, int>>(Res);
2637}
2638
2639static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2640 return parseVectorKind(Suffix, VectorKind).has_value();
2641}
2642
2643static unsigned matchSVEDataVectorRegName(StringRef Name) {
2644 return StringSwitch<unsigned>(Name.lower())
2645 .Case("z0", AArch64::Z0)
2646 .Case("z1", AArch64::Z1)
2647 .Case("z2", AArch64::Z2)
2648 .Case("z3", AArch64::Z3)
2649 .Case("z4", AArch64::Z4)
2650 .Case("z5", AArch64::Z5)
2651 .Case("z6", AArch64::Z6)
2652 .Case("z7", AArch64::Z7)
2653 .Case("z8", AArch64::Z8)
2654 .Case("z9", AArch64::Z9)
2655 .Case("z10", AArch64::Z10)
2656 .Case("z11", AArch64::Z11)
2657 .Case("z12", AArch64::Z12)
2658 .Case("z13", AArch64::Z13)
2659 .Case("z14", AArch64::Z14)
2660 .Case("z15", AArch64::Z15)
2661 .Case("z16", AArch64::Z16)
2662 .Case("z17", AArch64::Z17)
2663 .Case("z18", AArch64::Z18)
2664 .Case("z19", AArch64::Z19)
2665 .Case("z20", AArch64::Z20)
2666 .Case("z21", AArch64::Z21)
2667 .Case("z22", AArch64::Z22)
2668 .Case("z23", AArch64::Z23)
2669 .Case("z24", AArch64::Z24)
2670 .Case("z25", AArch64::Z25)
2671 .Case("z26", AArch64::Z26)
2672 .Case("z27", AArch64::Z27)
2673 .Case("z28", AArch64::Z28)
2674 .Case("z29", AArch64::Z29)
2675 .Case("z30", AArch64::Z30)
2676 .Case("z31", AArch64::Z31)
2677 .Default(0);
2678}
2679
2680static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2681 return StringSwitch<unsigned>(Name.lower())
2682 .Case("p0", AArch64::P0)
2683 .Case("p1", AArch64::P1)
2684 .Case("p2", AArch64::P2)
2685 .Case("p3", AArch64::P3)
2686 .Case("p4", AArch64::P4)
2687 .Case("p5", AArch64::P5)
2688 .Case("p6", AArch64::P6)
2689 .Case("p7", AArch64::P7)
2690 .Case("p8", AArch64::P8)
2691 .Case("p9", AArch64::P9)
2692 .Case("p10", AArch64::P10)
2693 .Case("p11", AArch64::P11)
2694 .Case("p12", AArch64::P12)
2695 .Case("p13", AArch64::P13)
2696 .Case("p14", AArch64::P14)
2697 .Case("p15", AArch64::P15)
2698 .Default(0);
2699}
2700
2701static unsigned matchSVEPredicateAsCounterRegName(StringRef Name) {
2702 return StringSwitch<unsigned>(Name.lower())
2703 .Case("pn0", AArch64::P0)
2704 .Case("pn1", AArch64::P1)
2705 .Case("pn2", AArch64::P2)
2706 .Case("pn3", AArch64::P3)
2707 .Case("pn4", AArch64::P4)
2708 .Case("pn5", AArch64::P5)
2709 .Case("pn6", AArch64::P6)
2710 .Case("pn7", AArch64::P7)
2711 .Case("pn8", AArch64::P8)
2712 .Case("pn9", AArch64::P9)
2713 .Case("pn10", AArch64::P10)
2714 .Case("pn11", AArch64::P11)
2715 .Case("pn12", AArch64::P12)
2716 .Case("pn13", AArch64::P13)
2717 .Case("pn14", AArch64::P14)
2718 .Case("pn15", AArch64::P15)
2719 .Default(0);
2720}
2721
2722static unsigned matchMatrixTileListRegName(StringRef Name) {
2723 return StringSwitch<unsigned>(Name.lower())
2724 .Case("za0.d", AArch64::ZAD0)
2725 .Case("za1.d", AArch64::ZAD1)
2726 .Case("za2.d", AArch64::ZAD2)
2727 .Case("za3.d", AArch64::ZAD3)
2728 .Case("za4.d", AArch64::ZAD4)
2729 .Case("za5.d", AArch64::ZAD5)
2730 .Case("za6.d", AArch64::ZAD6)
2731 .Case("za7.d", AArch64::ZAD7)
2732 .Case("za0.s", AArch64::ZAS0)
2733 .Case("za1.s", AArch64::ZAS1)
2734 .Case("za2.s", AArch64::ZAS2)
2735 .Case("za3.s", AArch64::ZAS3)
2736 .Case("za0.h", AArch64::ZAH0)
2737 .Case("za1.h", AArch64::ZAH1)
2738 .Case("za0.b", AArch64::ZAB0)
2739 .Default(0);
2740}
2741
2742static unsigned matchMatrixRegName(StringRef Name) {
2743 return StringSwitch<unsigned>(Name.lower())
2744 .Case("za", AArch64::ZA)
2745 .Case("za0.q", AArch64::ZAQ0)
2746 .Case("za1.q", AArch64::ZAQ1)
2747 .Case("za2.q", AArch64::ZAQ2)
2748 .Case("za3.q", AArch64::ZAQ3)
2749 .Case("za4.q", AArch64::ZAQ4)
2750 .Case("za5.q", AArch64::ZAQ5)
2751 .Case("za6.q", AArch64::ZAQ6)
2752 .Case("za7.q", AArch64::ZAQ7)
2753 .Case("za8.q", AArch64::ZAQ8)
2754 .Case("za9.q", AArch64::ZAQ9)
2755 .Case("za10.q", AArch64::ZAQ10)
2756 .Case("za11.q", AArch64::ZAQ11)
2757 .Case("za12.q", AArch64::ZAQ12)
2758 .Case("za13.q", AArch64::ZAQ13)
2759 .Case("za14.q", AArch64::ZAQ14)
2760 .Case("za15.q", AArch64::ZAQ15)
2761 .Case("za0.d", AArch64::ZAD0)
2762 .Case("za1.d", AArch64::ZAD1)
2763 .Case("za2.d", AArch64::ZAD2)
2764 .Case("za3.d", AArch64::ZAD3)
2765 .Case("za4.d", AArch64::ZAD4)
2766 .Case("za5.d", AArch64::ZAD5)
2767 .Case("za6.d", AArch64::ZAD6)
2768 .Case("za7.d", AArch64::ZAD7)
2769 .Case("za0.s", AArch64::ZAS0)
2770 .Case("za1.s", AArch64::ZAS1)
2771 .Case("za2.s", AArch64::ZAS2)
2772 .Case("za3.s", AArch64::ZAS3)
2773 .Case("za0.h", AArch64::ZAH0)
2774 .Case("za1.h", AArch64::ZAH1)
2775 .Case("za0.b", AArch64::ZAB0)
2776 .Case("za0h.q", AArch64::ZAQ0)
2777 .Case("za1h.q", AArch64::ZAQ1)
2778 .Case("za2h.q", AArch64::ZAQ2)
2779 .Case("za3h.q", AArch64::ZAQ3)
2780 .Case("za4h.q", AArch64::ZAQ4)
2781 .Case("za5h.q", AArch64::ZAQ5)
2782 .Case("za6h.q", AArch64::ZAQ6)
2783 .Case("za7h.q", AArch64::ZAQ7)
2784 .Case("za8h.q", AArch64::ZAQ8)
2785 .Case("za9h.q", AArch64::ZAQ9)
2786 .Case("za10h.q", AArch64::ZAQ10)
2787 .Case("za11h.q", AArch64::ZAQ11)
2788 .Case("za12h.q", AArch64::ZAQ12)
2789 .Case("za13h.q", AArch64::ZAQ13)
2790 .Case("za14h.q", AArch64::ZAQ14)
2791 .Case("za15h.q", AArch64::ZAQ15)
2792 .Case("za0h.d", AArch64::ZAD0)
2793 .Case("za1h.d", AArch64::ZAD1)
2794 .Case("za2h.d", AArch64::ZAD2)
2795 .Case("za3h.d", AArch64::ZAD3)
2796 .Case("za4h.d", AArch64::ZAD4)
2797 .Case("za5h.d", AArch64::ZAD5)
2798 .Case("za6h.d", AArch64::ZAD6)
2799 .Case("za7h.d", AArch64::ZAD7)
2800 .Case("za0h.s", AArch64::ZAS0)
2801 .Case("za1h.s", AArch64::ZAS1)
2802 .Case("za2h.s", AArch64::ZAS2)
2803 .Case("za3h.s", AArch64::ZAS3)
2804 .Case("za0h.h", AArch64::ZAH0)
2805 .Case("za1h.h", AArch64::ZAH1)
2806 .Case("za0h.b", AArch64::ZAB0)
2807 .Case("za0v.q", AArch64::ZAQ0)
2808 .Case("za1v.q", AArch64::ZAQ1)
2809 .Case("za2v.q", AArch64::ZAQ2)
2810 .Case("za3v.q", AArch64::ZAQ3)
2811 .Case("za4v.q", AArch64::ZAQ4)
2812 .Case("za5v.q", AArch64::ZAQ5)
2813 .Case("za6v.q", AArch64::ZAQ6)
2814 .Case("za7v.q", AArch64::ZAQ7)
2815 .Case("za8v.q", AArch64::ZAQ8)
2816 .Case("za9v.q", AArch64::ZAQ9)
2817 .Case("za10v.q", AArch64::ZAQ10)
2818 .Case("za11v.q", AArch64::ZAQ11)
2819 .Case("za12v.q", AArch64::ZAQ12)
2820 .Case("za13v.q", AArch64::ZAQ13)
2821 .Case("za14v.q", AArch64::ZAQ14)
2822 .Case("za15v.q", AArch64::ZAQ15)
2823 .Case("za0v.d", AArch64::ZAD0)
2824 .Case("za1v.d", AArch64::ZAD1)
2825 .Case("za2v.d", AArch64::ZAD2)
2826 .Case("za3v.d", AArch64::ZAD3)
2827 .Case("za4v.d", AArch64::ZAD4)
2828 .Case("za5v.d", AArch64::ZAD5)
2829 .Case("za6v.d", AArch64::ZAD6)
2830 .Case("za7v.d", AArch64::ZAD7)
2831 .Case("za0v.s", AArch64::ZAS0)
2832 .Case("za1v.s", AArch64::ZAS1)
2833 .Case("za2v.s", AArch64::ZAS2)
2834 .Case("za3v.s", AArch64::ZAS3)
2835 .Case("za0v.h", AArch64::ZAH0)
2836 .Case("za1v.h", AArch64::ZAH1)
2837 .Case("za0v.b", AArch64::ZAB0)
2838 .Default(0);
2839}
2840
2841bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2842 SMLoc &EndLoc) {
2843 return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
2844}
2845
2846OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2847 SMLoc &StartLoc,
2848 SMLoc &EndLoc) {
2849 StartLoc = getLoc();
2850 auto Res = tryParseScalarRegister(RegNo);
2851 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2852 return Res;
2853}
2854
2855// Matches a register name or register alias previously defined by '.req'
2856unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2857 RegKind Kind) {
2858 unsigned RegNum = 0;
2859 if ((RegNum = matchSVEDataVectorRegName(Name)))
2860 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2861
2862 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2863 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2864
2865 if ((RegNum = matchSVEPredicateAsCounterRegName(Name)))
2866 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
2867
2868 if ((RegNum = MatchNeonVectorRegName(Name)))
2869 return Kind == RegKind::NeonVector ? RegNum : 0;
2870
2871 if ((RegNum = matchMatrixRegName(Name)))
2872 return Kind == RegKind::Matrix ? RegNum : 0;
2873
2874 if (Name.equals_insensitive("zt0"))
2875 return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0;
2876
2877 // The parsed register must be of RegKind Scalar
2878 if ((RegNum = MatchRegisterName(Name)))
2879 return (Kind == RegKind::Scalar) ? RegNum : 0;
2880
2881 if (!RegNum) {
2882 // Handle a few common aliases of registers.
2883 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2884 .Case("fp", AArch64::FP)
2885 .Case("lr", AArch64::LR)
2886 .Case("x31", AArch64::XZR)
2887 .Case("w31", AArch64::WZR)
2888 .Default(0))
2889 return Kind == RegKind::Scalar ? RegNum : 0;
2890
2891 // Check for aliases registered via .req. Canonicalize to lower case.
2892 // That's more consistent since register names are case insensitive, and
2893 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2894 auto Entry = RegisterReqs.find(Name.lower());
2895 if (Entry == RegisterReqs.end())
2896 return 0;
2897
2898 // set RegNum if the match is the right kind of register
2899 if (Kind == Entry->getValue().first)
2900 RegNum = Entry->getValue().second;
2901 }
2902 return RegNum;
2903}
2904
2905unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
2906 switch (K) {
2907 case RegKind::Scalar:
2908 case RegKind::NeonVector:
2909 case RegKind::SVEDataVector:
2910 return 32;
2911 case RegKind::Matrix:
2912 case RegKind::SVEPredicateVector:
2913 case RegKind::SVEPredicateAsCounter:
2914 return 16;
2915 case RegKind::LookupTable:
2916 return 1;
2917 }
2918 llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2918)
;
2919}
2920
2921/// tryParseScalarRegister - Try to parse a register name. The token must be an
2922/// Identifier when called, and if it is a register name the token is eaten and
2923/// the register is added to the operand list.
2924OperandMatchResultTy
2925AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2926 const AsmToken &Tok = getTok();
2927 if (Tok.isNot(AsmToken::Identifier))
2928 return MatchOperand_NoMatch;
2929
2930 std::string lowerCase = Tok.getString().lower();
2931 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2932 if (Reg == 0)
2933 return MatchOperand_NoMatch;
2934
2935 RegNum = Reg;
2936 Lex(); // Eat identifier token.
2937 return MatchOperand_Success;
2938}
2939
2940/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2941OperandMatchResultTy
2942AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2943 SMLoc S = getLoc();
2944
2945 if (getTok().isNot(AsmToken::Identifier)) {
2946 Error(S, "Expected cN operand where 0 <= N <= 15");
2947 return MatchOperand_ParseFail;
2948 }
2949
2950 StringRef Tok = getTok().getIdentifier();
2951 if (Tok[0] != 'c' && Tok[0] != 'C') {
2952 Error(S, "Expected cN operand where 0 <= N <= 15");
2953 return MatchOperand_ParseFail;
2954 }
2955
2956 uint32_t CRNum;
2957 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2958 if (BadNum || CRNum > 15) {
2959 Error(S, "Expected cN operand where 0 <= N <= 15");
2960 return MatchOperand_ParseFail;
2961 }
2962
2963 Lex(); // Eat identifier token.
2964 Operands.push_back(
2965 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2966 return MatchOperand_Success;
2967}
2968
2969// Either an identifier for named values or a 6-bit immediate.
2970OperandMatchResultTy
2971AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
2972 SMLoc S = getLoc();
2973 const AsmToken &Tok = getTok();
2974
2975 unsigned MaxVal = 63;
2976
2977 // Immediate case, with optional leading hash:
2978 if (parseOptionalToken(AsmToken::Hash) ||
2979 Tok.is(AsmToken::Integer)) {
2980 const MCExpr *ImmVal;
2981 if (getParser().parseExpression(ImmVal))
2982 return MatchOperand_ParseFail;
2983
2984 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2985 if (!MCE) {
2986 TokError("immediate value expected for prefetch operand");
2987 return MatchOperand_ParseFail;
2988 }
2989 unsigned prfop = MCE->getValue();
2990 if (prfop > MaxVal) {
2991 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2992 "] expected");
2993 return MatchOperand_ParseFail;
2994 }
2995
2996 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
2997 Operands.push_back(AArch64Operand::CreatePrefetch(
2998 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
2999 return MatchOperand_Success;
3000 }
3001
3002 if (Tok.isNot(AsmToken::Identifier)) {
3003 TokError("prefetch hint expected");
3004 return MatchOperand_ParseFail;
3005 }
3006
3007 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3008 if (!RPRFM) {
3009 TokError("prefetch hint expected");
3010 return MatchOperand_ParseFail;
3011 }
3012
3013 Operands.push_back(AArch64Operand::CreatePrefetch(
3014 RPRFM->Encoding, Tok.getString(), S, getContext()));
3015 Lex(); // Eat identifier token.
3016 return MatchOperand_Success;
3017}
3018
3019/// tryParsePrefetch - Try to parse a prefetch operand.
3020template <bool IsSVEPrefetch>
3021OperandMatchResultTy
3022AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3023 SMLoc S = getLoc();
3024 const AsmToken &Tok = getTok();
3025
3026 auto LookupByName = [](StringRef N) {
3027 if (IsSVEPrefetch) {
3028 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3029 return Optional<unsigned>(Res->Encoding);
3030 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3031 return Optional<unsigned>(Res->Encoding);
3032 return Optional<unsigned>();
3033 };
3034
3035 auto LookupByEncoding = [](unsigned E) {
3036 if (IsSVEPrefetch) {
3037 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3038 return Optional<StringRef>(Res->Name);
3039 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3040 return Optional<StringRef>(Res->Name);
3041 return Optional<StringRef>();
3042 };
3043 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3044
3045 // Either an identifier for named values or a 5-bit immediate.
3046 // Eat optional hash.
3047 if (parseOptionalToken(AsmToken::Hash) ||
3048 Tok.is(AsmToken::Integer)) {
3049 const MCExpr *ImmVal;
3050 if (getParser().parseExpression(ImmVal))
3051 return MatchOperand_ParseFail;
3052
3053 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3054 if (!MCE) {
3055 TokError("immediate value expected for prefetch operand");
3056 return MatchOperand_ParseFail;
3057 }
3058 unsigned prfop = MCE->getValue();
3059 if (prfop > MaxVal) {
3060 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3061 "] expected");
3062 return MatchOperand_ParseFail;
3063 }
3064
3065 auto PRFM = LookupByEncoding(MCE->getValue());
3066 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3067 S, getContext()));
3068 return MatchOperand_Success;
3069 }
3070
3071 if (Tok.isNot(AsmToken::Identifier)) {
3072 TokError("prefetch hint expected");
3073 return MatchOperand_ParseFail;
3074 }
3075
3076 auto PRFM = LookupByName(Tok.getString());
3077 if (!PRFM) {
3078 TokError("prefetch hint expected");
3079 return MatchOperand_ParseFail;
3080 }
3081
3082 Operands.push_back(AArch64Operand::CreatePrefetch(
3083 *PRFM, Tok.getString(), S, getContext()));
3084 Lex(); // Eat identifier token.
3085 return MatchOperand_Success;
3086}
3087
3088/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3089OperandMatchResultTy
3090AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3091 SMLoc S = getLoc();
3092 const AsmToken &Tok = getTok();
3093 if (Tok.isNot(AsmToken::Identifier)) {
3094 TokError("invalid operand for instruction");
3095 return MatchOperand_ParseFail;
3096 }
3097
3098 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3099 if (!PSB) {
3100 TokError("invalid operand for instruction");
3101 return MatchOperand_ParseFail;
3102 }
3103
3104 Operands.push_back(AArch64Operand::CreatePSBHint(
3105 PSB->Encoding, Tok.getString(), S, getContext()));
3106 Lex(); // Eat identifier token.
3107 return MatchOperand_Success;
3108}
3109
3110OperandMatchResultTy
3111AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3112 SMLoc StartLoc = getLoc();
3113
3114 unsigned RegNum;
3115
3116 // The case where xzr, xzr is not present is handled by an InstAlias.
3117
3118 auto RegTok = getTok(); // in case we need to backtrack
3119 if (tryParseScalarRegister(RegNum) != MatchOperand_Success)
3120 return MatchOperand_NoMatch;
3121
3122 if (RegNum != AArch64::XZR) {
3123 getLexer().UnLex(RegTok);
3124 return MatchOperand_NoMatch;
3125 }
3126
3127 if (parseComma())
3128 return MatchOperand_ParseFail;
3129
3130 if (tryParseScalarRegister(RegNum) != MatchOperand_Success) {
3131 TokError("expected register operand");
3132 return MatchOperand_ParseFail;
3133 }
3134
3135 if (RegNum != AArch64::XZR) {
3136 TokError("xzr must be followed by xzr");
3137 return MatchOperand_ParseFail;
3138 }
3139
3140 // We need to push something, since we claim this is an operand in .td.
3141 // See also AArch64AsmParser::parseKeywordOperand.
3142 Operands.push_back(AArch64Operand::CreateReg(
3143 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3144
3145 return MatchOperand_Success;
3146}
3147
3148/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3149OperandMatchResultTy
3150AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3151 SMLoc S = getLoc();
3152 const AsmToken &Tok = getTok();
3153 if (Tok.isNot(AsmToken::Identifier)) {
3154 TokError("invalid operand for instruction");
3155 return MatchOperand_ParseFail;
3156 }
3157
3158 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3159 if (!BTI) {
3160 TokError("invalid operand for instruction");
3161 return MatchOperand_ParseFail;
3162 }
3163
3164 Operands.push_back(AArch64Operand::CreateBTIHint(
3165 BTI->Encoding, Tok.getString(), S, getContext()));
3166 Lex(); // Eat identifier token.
3167 return MatchOperand_Success;
3168}
3169
3170/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3171/// instruction.
3172OperandMatchResultTy
3173AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3174 SMLoc S = getLoc();
3175 const MCExpr *Expr = nullptr;
3176
3177 if (getTok().is(AsmToken::Hash)) {
3178 Lex(); // Eat hash token.
3179 }
3180
3181 if (parseSymbolicImmVal(Expr))
3182 return MatchOperand_ParseFail;
3183
3184 AArch64MCExpr::VariantKind ELFRefKind;
3185 MCSymbolRefExpr::VariantKind DarwinRefKind;
3186 int64_t Addend;
3187 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3188 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3189 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3190 // No modifier was specified at all; this is the syntax for an ELF basic
3191 // ADRP relocation (unfortunately).
3192 Expr =
3193 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
3194 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3195 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3196 Addend != 0) {
3197 Error(S, "gotpage label reference not allowed an addend");
3198 return MatchOperand_ParseFail;
3199 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3200 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3201 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3202 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3203 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3204 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3205 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3206 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
3207 // The operand must be an @page or @gotpage qualified symbolref.
3208 Error(S, "page or gotpage label reference expected");
3209 return MatchOperand_ParseFail;
3210 }
3211 }
3212
3213 // We have either a label reference possibly with addend or an immediate. The
3214 // addend is a raw value here. The linker will adjust it to only reference the
3215 // page.
3216 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3217 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3218
3219 return MatchOperand_Success;
3220}
3221
3222/// tryParseAdrLabel - Parse and validate a source label for the ADR
3223/// instruction.
3224OperandMatchResultTy
3225AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3226 SMLoc S = getLoc();
3227 const MCExpr *Expr = nullptr;
3228
3229 // Leave anything with a bracket to the default for SVE
3230 if (getTok().is(AsmToken::LBrac))
3231 return MatchOperand_NoMatch;
3232
3233 if (getTok().is(AsmToken::Hash))
3234 Lex(); // Eat hash token.
3235
3236 if (parseSymbolicImmVal(Expr))
3237 return MatchOperand_ParseFail;
3238
3239 AArch64MCExpr::VariantKind ELFRefKind;
3240 MCSymbolRefExpr::VariantKind DarwinRefKind;
3241 int64_t Addend;
3242 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3243 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3244 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3245 // No modifier was specified at all; this is the syntax for an ELF basic
3246 // ADR relocation (unfortunately).
3247 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3248 } else {
3249 Error(S, "unexpected adr label");
3250 return MatchOperand_ParseFail;
3251 }
3252 }
3253
3254 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3255 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3256 return MatchOperand_Success;
3257}
3258
3259/// tryParseFPImm - A floating point immediate expression operand.
3260template<bool AddFPZeroAsLiteral>
3261OperandMatchResultTy
3262AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3263 SMLoc S = getLoc();
3264
3265 bool Hash = parseOptionalToken(AsmToken::Hash);
3266
3267 // Handle negation, as that still comes through as a separate token.
3268 bool isNegative = parseOptionalToken(AsmToken::Minus);
3269
3270 const AsmToken &Tok = getTok();
3271 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3272 if (!Hash)
3273 return MatchOperand_NoMatch;
3274 TokError("invalid floating point immediate");
3275 return MatchOperand_ParseFail;
3276 }
3277
3278 // Parse hexadecimal representation.
3279 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
3280 if (Tok.getIntVal() > 255 || isNegative) {
3281 TokError("encoded floating point value out of range");
3282 return MatchOperand_ParseFail;
3283 }
3284
3285 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
3286 Operands.push_back(
3287 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3288 } else {
3289 // Parse FP representation.
3290 APFloat RealVal(APFloat::IEEEdouble());
3291 auto StatusOrErr =
3292 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3293 if (errorToBool(StatusOrErr.takeError())) {
3294 TokError("invalid floating point representation");
3295 return MatchOperand_ParseFail;
3296 }
3297
3298 if (isNegative)
3299 RealVal.changeSign();
3300
3301 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3302 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3303 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3304 } else
3305 Operands.push_back(AArch64Operand::CreateFPImm(
3306 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3307 }
3308
3309 Lex(); // Eat the token.
3310
3311 return MatchOperand_Success;
3312}
3313
3314/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3315/// a shift suffix, for example '#1, lsl #12'.
3316OperandMatchResultTy
3317AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3318 SMLoc S = getLoc();
3319
3320 if (getTok().is(AsmToken::Hash))
1
Taking true branch
3321 Lex(); // Eat '#'
3322 else if (getTok().isNot(AsmToken::Integer))
3323 // Operand should start from # or should be integer, emit error otherwise.
3324 return MatchOperand_NoMatch;
3325
3326 if (getTok().is(AsmToken::Integer) &&
2
Taking true branch
3327 getLexer().peekTok().is(AsmToken::Colon))
3328 return tryParseImmRange(Operands);
3
Calling 'AArch64AsmParser::tryParseImmRange'
3329
3330 const MCExpr *Imm = nullptr;
3331 if (parseSymbolicImmVal(Imm))
3332 return MatchOperand_ParseFail;
3333 else if (getTok().isNot(AsmToken::Comma)) {
3334 Operands.push_back(
3335 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3336 return MatchOperand_Success;
3337 }
3338
3339 // Eat ','
3340 Lex();
3341 StringRef VecGroup;
3342 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3343 Operands.push_back(
3344 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3345 Operands.push_back(
3346 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3347 return MatchOperand_Success;
3348 }
3349
3350 // The optional operand must be "lsl #N" where N is non-negative.
3351 if (!getTok().is(AsmToken::Identifier) ||
3352 !getTok().getIdentifier().equals_insensitive("lsl")) {
3353 Error(getLoc(), "only 'lsl #+N' valid after immediate");
3354 return MatchOperand_ParseFail;
3355 }
3356
3357 // Eat 'lsl'
3358 Lex();
3359
3360 parseOptionalToken(AsmToken::Hash);
3361
3362 if (getTok().isNot(AsmToken::Integer)) {
3363 Error(getLoc(), "only 'lsl #+N' valid after immediate");
3364 return MatchOperand_ParseFail;
3365 }
3366
3367 int64_t ShiftAmount = getTok().getIntVal();
3368
3369 if (ShiftAmount < 0) {
3370 Error(getLoc(), "positive shift amount required");
3371 return MatchOperand_ParseFail;
3372 }
3373 Lex(); // Eat the number
3374
3375 // Just in case the optional lsl #0 is used for immediates other than zero.
3376 if (ShiftAmount == 0 && Imm != nullptr) {
3377 Operands.push_back(
3378 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3379 return MatchOperand_Success;
3380 }
3381
3382 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3383 getLoc(), getContext()));
3384 return MatchOperand_Success;
3385}
3386
3387/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3388/// suggestion to help common typos.
3389AArch64CC::CondCode
3390AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3391 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3392 .Case("eq", AArch64CC::EQ)
3393 .Case("ne", AArch64CC::NE)
3394 .Case("cs", AArch64CC::HS)
3395 .Case("hs", AArch64CC::HS)
3396 .Case("cc", AArch64CC::LO)
3397 .Case("lo", AArch64CC::LO)
3398 .Case("mi", AArch64CC::MI)
3399 .Case("pl", AArch64CC::PL)
3400 .Case("vs", AArch64CC::VS)
3401 .Case("vc", AArch64CC::VC)
3402 .Case("hi", AArch64CC::HI)
3403 .Case("ls", AArch64CC::LS)
3404 .Case("ge", AArch64CC::GE)
3405 .Case("lt", AArch64CC::LT)
3406 .Case("gt", AArch64CC::GT)
3407 .Case("le", AArch64CC::LE)
3408 .Case("al", AArch64CC::AL)
3409 .Case("nv", AArch64CC::NV)
3410 .Default(AArch64CC::Invalid);
3411
3412 if (CC == AArch64CC::Invalid &&
3413 getSTI().getFeatureBits()[AArch64::FeatureSVE]) {
3414 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3415 .Case("none", AArch64CC::EQ)
3416 .Case("any", AArch64CC::NE)
3417 .Case("nlast", AArch64CC::HS)
3418 .Case("last", AArch64CC::LO)
3419 .Case("first", AArch64CC::MI)
3420 .Case("nfrst", AArch64CC::PL)
3421 .Case("pmore", AArch64CC::HI)
3422 .Case("plast", AArch64CC::LS)
3423 .Case("tcont", AArch64CC::GE)
3424 .Case("tstop", AArch64CC::LT)
3425 .Default(AArch64CC::Invalid);
3426
3427 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3428 Suggestion = "nfrst";
3429 }
3430 return CC;
3431}
3432
3433/// parseCondCode - Parse a Condition Code operand.
3434bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3435 bool invertCondCode) {
3436 SMLoc S = getLoc();
3437 const AsmToken &Tok = getTok();
3438 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")(static_cast <bool> (Tok.is(AsmToken::Identifier) &&
"Token is not an Identifier") ? void (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3438
, __extension__ __PRETTY_FUNCTION__))
;
3439
3440 StringRef Cond = Tok.getString();
3441 std::string Suggestion;
3442 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3443 if (CC == AArch64CC::Invalid) {
3444 std::string Msg = "invalid condition code";
3445 if (!Suggestion.empty())
3446 Msg += ", did you mean " + Suggestion + "?";
3447 return TokError(Msg);
3448 }
3449 Lex(); // Eat identifier token.
3450
3451 if (invertCondCode) {
3452 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3453 return TokError("condition codes AL and NV are invalid for this instruction");
3454 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
3455 }
3456
3457 Operands.push_back(
3458 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3459 return false;
3460}
3461
3462OperandMatchResultTy
3463AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3464 const AsmToken &Tok = getTok();
3465 SMLoc S = getLoc();
3466
3467 if (Tok.isNot(AsmToken::Identifier)) {
3468 TokError("invalid operand for instruction");
3469 return MatchOperand_ParseFail;
3470 }
3471
3472 unsigned PStateImm = -1;
3473 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3474 if (SVCR && SVCR->haveFeatures(getSTI().getFeatureBits()))
3475 PStateImm = SVCR->Encoding;
3476
3477 Operands.push_back(
3478 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3479 Lex(); // Eat identifier token.
3480 return MatchOperand_Success;
3481}
3482
3483OperandMatchResultTy
3484AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3485 const AsmToken &Tok = getTok();
3486 SMLoc S = getLoc();
3487
3488 StringRef Name = Tok.getString();
3489
3490 if (Name.equals_insensitive("za") || Name.startswith_insensitive("za.")) {
3491 Lex(); // eat "za[.(b|h|s|d)]"
3492 unsigned ElementWidth = 0;
3493 auto DotPosition = Name.find('.');
3494 if (DotPosition != StringRef::npos) {
3495 const auto &KindRes =
3496 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3497 if (!KindRes) {
3498 TokError(
3499 "Expected the register to be followed by element width suffix");
3500 return MatchOperand_ParseFail;
3501 }
3502 ElementWidth = KindRes->second;
3503 }
3504 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3505 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3506 getContext()));
3507 if (getLexer().is(AsmToken::LBrac)) {
3508 // There's no comma after matrix operand, so we can parse the next operand
3509 // immediately.
3510 if (parseOperand(Operands, false, false))
3511 return MatchOperand_NoMatch;
3512 }
3513 return MatchOperand_Success;
3514 }
3515
3516 // Try to parse matrix register.
3517 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3518 if (!Reg)
3519 return MatchOperand_NoMatch;
3520
3521 size_t DotPosition = Name.find('.');
3522 assert(DotPosition != StringRef::npos && "Unexpected register")(static_cast <bool> (DotPosition != StringRef::npos &&
"Unexpected register") ? void (0) : __assert_fail ("DotPosition != StringRef::npos && \"Unexpected register\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3522
, __extension__ __PRETTY_FUNCTION__))
;
3523
3524 StringRef Head = Name.take_front(DotPosition);
3525 StringRef Tail = Name.drop_front(DotPosition);
3526 StringRef RowOrColumn = Head.take_back();
3527
3528 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3529 .Case("h", MatrixKind::Row)
3530 .Case("v", MatrixKind::Col)
3531 .Default(MatrixKind::Tile);
3532
3533 // Next up, parsing the suffix
3534 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3535 if (!KindRes) {
3536 TokError("Expected the register to be followed by element width suffix");
3537 return MatchOperand_ParseFail;
3538 }
3539 unsigned ElementWidth = KindRes->second;
3540
3541 Lex();
3542
3543 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3544 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3545
3546 if (getLexer().is(AsmToken::LBrac)) {
3547 // There's no comma after matrix operand, so we can parse the next operand
3548 // immediately.
3549 if (parseOperand(Operands, false, false))
3550 return MatchOperand_NoMatch;
3551 }
3552 return MatchOperand_Success;
3553}
3554
3555/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3556/// them if present.
3557OperandMatchResultTy
3558AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3559 const AsmToken &Tok = getTok();
3560 std::string LowerID = Tok.getString().lower();
3561 AArch64_AM::ShiftExtendType ShOp =
3562 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3563 .Case("lsl", AArch64_AM::LSL)
3564 .Case("lsr", AArch64_AM::LSR)
3565 .Case("asr", AArch64_AM::ASR)
3566 .Case("ror", AArch64_AM::ROR)
3567 .Case("msl", AArch64_AM::MSL)
3568 .Case("uxtb", AArch64_AM::UXTB)
3569 .Case("uxth", AArch64_AM::UXTH)
3570 .Case("uxtw", AArch64_AM::UXTW)
3571 .Case("uxtx", AArch64_AM::UXTX)
3572 .Case("sxtb", AArch64_AM::SXTB)
3573 .Case("sxth", AArch64_AM::SXTH)
3574 .Case("sxtw", AArch64_AM::SXTW)
3575 .Case("sxtx", AArch64_AM::SXTX)
3576 .Default(AArch64_AM::InvalidShiftExtend);
3577
3578 if (ShOp == AArch64_AM::InvalidShiftExtend)
3579 return MatchOperand_NoMatch;
3580
3581 SMLoc S = Tok.getLoc();
3582 Lex();
3583
3584 bool Hash = parseOptionalToken(AsmToken::Hash);
3585
3586 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3587 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3588 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3589 ShOp == AArch64_AM::MSL) {
3590 // We expect a number here.
3591 TokError("expected #imm after shift specifier");
3592 return MatchOperand_ParseFail;
3593 }
3594
3595 // "extend" type operations don't need an immediate, #0 is implicit.
3596 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3597 Operands.push_back(
3598 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3599 return MatchOperand_Success;
3600 }
3601
3602 // Make sure we do actually have a number, identifier or a parenthesized
3603 // expression.
3604 SMLoc E = getLoc();
3605 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3606 !getTok().is(AsmToken::Identifier)) {
3607 Error(E, "expected integer shift amount");
3608 return MatchOperand_ParseFail;
3609 }
3610
3611 const MCExpr *ImmVal;
3612 if (getParser().parseExpression(ImmVal))
3613 return MatchOperand_ParseFail;
3614
3615 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3616 if (!MCE) {
3617 Error(E, "expected constant '#imm' after shift specifier");
3618 return MatchOperand_ParseFail;
3619 }
3620
3621 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3622 Operands.push_back(AArch64Operand::CreateShiftExtend(
3623 ShOp, MCE->getValue(), true, S, E, getContext()));
3624 return MatchOperand_Success;
3625}
3626
3627static const struct Extension {
3628 const char *Name;
3629 const FeatureBitset Features;
3630} ExtensionMap[] = {
3631 {"crc", {AArch64::FeatureCRC}},
3632 {"sm4", {AArch64::FeatureSM4}},
3633 {"sha3", {AArch64::FeatureSHA3}},
3634 {"sha2", {AArch64::FeatureSHA2}},
3635 {"aes", {AArch64::FeatureAES}},
3636 {"crypto", {AArch64::FeatureCrypto}},
3637 {"fp", {AArch64::FeatureFPARMv8}},
3638 {"simd", {AArch64::FeatureNEON}},
3639 {"ras", {AArch64::FeatureRAS}},
3640 {"lse", {AArch64::FeatureLSE}},
3641 {"predres", {AArch64::FeaturePredRes}},
3642 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3643 {"mte", {AArch64::FeatureMTE}},
3644 {"memtag", {AArch64::FeatureMTE}},
3645 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3646 {"pan", {AArch64::FeaturePAN}},
3647 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3648 {"ccpp", {AArch64::FeatureCCPP}},
3649 {"rcpc", {AArch64::FeatureRCPC}},
3650 {"rng", {AArch64::FeatureRandGen}},
3651 {"sve", {AArch64::FeatureSVE}},
3652 {"sve2", {AArch64::FeatureSVE2}},
3653 {"sve2-aes", {AArch64::FeatureSVE2AES}},
3654 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3655 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3656 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3657 {"sve2p1", {AArch64::FeatureSVE2p1}},
3658 {"b16b16", {AArch64::FeatureB16B16}},
3659 {"ls64", {AArch64::FeatureLS64}},
3660 {"xs", {AArch64::FeatureXS}},
3661 {"pauth", {AArch64::FeaturePAuth}},
3662 {"flagm", {AArch64::FeatureFlagM}},
3663 {"rme", {AArch64::FeatureRME}},
3664 {"sme", {AArch64::FeatureSME}},
3665 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3666 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3667 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3668 {"sme2", {AArch64::FeatureSME2}},
3669 {"sme2p1", {AArch64::FeatureSME2p1}},
3670 {"hbc", {AArch64::FeatureHBC}},
3671 {"mops", {AArch64::FeatureMOPS}},
3672 {"mec", {AArch64::FeatureMEC}},
3673 {"the", {AArch64::FeatureTHE}},
3674 {"d128", {AArch64::FeatureD128}},
3675 {"lse128", {AArch64::FeatureLSE128}},
3676 // FIXME: Unsupported extensions
3677 {"lor", {}},
3678 {"rdma", {}},
3679 {"profile", {}},
3680};
3681
3682static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3683 if (FBS[AArch64::HasV8_0aOps])
3684 Str += "ARMv8a";
3685 if (FBS[AArch64::HasV8_1aOps])
3686 Str += "ARMv8.1a";
3687 else if (FBS[AArch64::HasV8_2aOps])
3688 Str += "ARMv8.2a";
3689 else if (FBS[AArch64::HasV8_3aOps])
3690 Str += "ARMv8.3a";
3691 else if (FBS[AArch64::HasV8_4aOps])
3692 Str += "ARMv8.4a";
3693 else if (FBS[AArch64::HasV8_5aOps])
3694 Str += "ARMv8.5a";
3695 else if (FBS[AArch64::HasV8_6aOps])
3696 Str += "ARMv8.6a";
3697 else if (FBS[AArch64::HasV8_7aOps])
3698 Str += "ARMv8.7a";
3699 else if (FBS[AArch64::HasV8_8aOps])
3700 Str += "ARMv8.8a";
3701 else if (FBS[AArch64::HasV8_9aOps])
3702 Str += "ARMv8.9a";
3703 else if (FBS[AArch64::HasV9_0aOps])
3704 Str += "ARMv9-a";
3705 else if (FBS[AArch64::HasV9_1aOps])
3706 Str += "ARMv9.1a";
3707 else if (FBS[AArch64::HasV9_2aOps])
3708 Str += "ARMv9.2a";
3709 else if (FBS[AArch64::HasV9_3aOps])
3710 Str += "ARMv9.3a";
3711 else if (FBS[AArch64::HasV9_4aOps])
3712 Str += "ARMv9.4a";
3713 else if (FBS[AArch64::HasV8_0rOps])
3714 Str += "ARMv8r";
3715 else {
3716 SmallVector<std::string, 2> ExtMatches;
3717 for (const auto& Ext : ExtensionMap) {
3718 // Use & in case multiple features are enabled
3719 if ((FBS & Ext.Features) != FeatureBitset())
3720 ExtMatches.push_back(Ext.Name);
3721 }
3722 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3723 }
3724}
3725
3726void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3727 SMLoc S) {
3728 const uint16_t Op2 = Encoding & 7;
3729 const uint16_t Cm = (Encoding & 0x78) >> 3;
3730 const uint16_t Cn = (Encoding & 0x780) >> 7;
3731 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3732
3733 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3734
3735 Operands.push_back(
3736 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3737 Operands.push_back(
3738 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3739 Operands.push_back(
3740 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3741 Expr = MCConstantExpr::create(Op2, getContext());
3742 Operands.push_back(
3743 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3744}
3745
3746/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3747/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3748bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3749 OperandVector &Operands) {
3750 if (Name.contains('.'))
3751 return TokError("invalid operand");
3752
3753 Mnemonic = Name;
3754 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3755
3756 const AsmToken &Tok = getTok();
3757 StringRef Op = Tok.getString();
3758 SMLoc S = Tok.getLoc();
3759
3760 if (Mnemonic == "ic") {
3761 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3762 if (!IC)
3763 return TokError("invalid operand for IC instruction");
3764 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3765 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3766 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
3767 return TokError(Str);
3768 }
3769 createSysAlias(IC->Encoding, Operands, S);
3770 } else if (Mnemonic == "dc") {
3771 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3772 if (!DC)
3773 return TokError("invalid operand for DC instruction");
3774 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3775 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3776 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3777 return TokError(Str);
3778 }
3779 createSysAlias(DC->Encoding, Operands, S);
3780 } else if (Mnemonic == "at") {
3781 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3782 if (!AT)
3783 return TokError("invalid operand for AT instruction");
3784 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3785 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3786 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
3787 return TokError(Str);
3788 }
3789 createSysAlias(AT->Encoding, Operands, S);
3790 } else if (Mnemonic == "tlbi") {
3791 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3792 if (!TLBI)
3793 return TokError("invalid operand for TLBI instruction");
3794 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3795 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3796 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
3797 return TokError(Str);
3798 }
3799 createSysAlias(TLBI->Encoding, Operands, S);
3800 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
3801 const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
3802 if (!PRCTX)
3803 return TokError("invalid operand for prediction restriction instruction");
3804 else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
3805 std::string Str(
3806 Mnemonic.upper() + std::string(PRCTX->Name) + " requires: ");
3807 setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
3808 return TokError(Str);
3809 }
3810 uint16_t PRCTX_Op2 =
3811 Mnemonic == "cfp" ? 4 :
3812 Mnemonic == "dvp" ? 5 :
3813 Mnemonic == "cpp" ? 7 :
3814 0;
3815 assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction")(static_cast <bool> (PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction"
) ? void (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3815
, __extension__ __PRETTY_FUNCTION__))
;
3816 createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
3817 }
3818
3819 Lex(); // Eat operand.
3820
3821 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3822 bool HasRegister = false;
3823
3824 // Check for the optional register operand.
3825 if (parseOptionalToken(AsmToken::Comma)) {
3826 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3827 return TokError("expected register operand");
3828 HasRegister = true;
3829 }
3830
3831 if (ExpectRegister && !HasRegister)
3832 return TokError("specified " + Mnemonic + " op requires a register");
3833 else if (!ExpectRegister && HasRegister)
3834 return TokError("specified " + Mnemonic + " op does not use a register");
3835
3836 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3837 return true;
3838
3839 return false;
3840}
3841
3842/// parseSyspAlias - The TLBIP instructions are simple aliases for
3843/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
3844bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
3845 OperandVector &Operands) {
3846 if (Name.contains('.'))
3847 return TokError("invalid operand");
3848
3849 Mnemonic = Name;
3850 Operands.push_back(
3851 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
3852
3853 const AsmToken &Tok = getTok();
3854 StringRef Op = Tok.getString();
3855 SMLoc S = Tok.getLoc();
3856
3857 if (Mnemonic == "tlbip") {
3858 bool HasnXSQualifier = Op.endswith_insensitive("nXS");
3859 if (HasnXSQualifier) {
3860 Op = Op.drop_back(3);
3861 }
3862 const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
3863 if (!TLBIorig)
3864 return TokError("invalid operand for TLBIP instruction");
3865 const AArch64TLBI::TLBI TLBI(
3866 TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
3867 TLBIorig->NeedsReg,
3868 HasnXSQualifier
3869 ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
3870 : TLBIorig->FeaturesRequired);
3871 if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
3872 std::string Name =
3873 std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
3874 std::string Str("TLBIP " + Name + " requires: ");
3875 setRequiredFeatureString(TLBI.getRequiredFeatures(), Str);
3876 return TokError(Str);
3877 }
3878 createSysAlias(TLBI.Encoding, Operands, S);
3879 }
3880
3881 Lex(); // Eat operand.
3882
3883 if (parseComma())
3884 return true;
3885
3886 if (Tok.isNot(AsmToken::Identifier))
3887 return TokError("expected register identifier");
3888 auto Result = tryParseSyspXzrPair(Operands);
3889 if (Result == MatchOperand_NoMatch)
3890 Result = tryParseGPRSeqPair(Operands);
3891 if (Result != MatchOperand_Success)
3892 return TokError("specified " + Mnemonic +
3893 " op requires a pair of registers");
3894
3895 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3896 return true;
3897
3898 return false;
3899}
3900
3901OperandMatchResultTy
3902AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3903 MCAsmParser &Parser = getParser();
3904 const AsmToken &Tok = getTok();
3905
3906 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3907 TokError("'csync' operand expected");
3908 return MatchOperand_ParseFail;
3909 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3910 // Immediate operand.
3911 const MCExpr *ImmVal;
3912 SMLoc ExprLoc = getLoc();
3913 AsmToken IntTok = Tok;
3914 if (getParser().parseExpression(ImmVal))
3915 return MatchOperand_ParseFail;
3916 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3917 if (!MCE) {
3918 Error(ExprLoc, "immediate value expected for barrier operand");
3919 return MatchOperand_ParseFail;
3920 }
3921 int64_t Value = MCE->getValue();
3922 if (Mnemonic == "dsb" && Value > 15) {
3923 // This case is a no match here, but it might be matched by the nXS
3924 // variant. Deliberately not unlex the optional '#' as it is not necessary
3925 // to characterize an integer immediate.
3926 Parser.getLexer().UnLex(IntTok);
3927 return MatchOperand_NoMatch;
3928 }
3929 if (Value < 0 || Value > 15) {
3930 Error(ExprLoc, "barrier operand out of range");
3931 return MatchOperand_ParseFail;
3932 }
3933 auto DB = AArch64DB::lookupDBByEncoding(Value);
3934 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3935 ExprLoc, getContext(),
3936 false /*hasnXSModifier*/));
3937 return MatchOperand_Success;
3938 }
3939
3940 if (Tok.isNot(AsmToken::Identifier)) {
3941 TokError("invalid operand for instruction");
3942 return MatchOperand_ParseFail;
3943 }
3944
3945 StringRef Operand = Tok.getString();
3946 auto TSB = AArch64TSB::lookupTSBByName(Operand);
3947 auto DB = AArch64DB::lookupDBByName(Operand);
3948 // The only valid named option for ISB is 'sy'
3949 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3950 TokError("'sy' or #imm operand expected");
3951 return MatchOperand_ParseFail;
3952 // The only valid named option for TSB is 'csync'
3953 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3954 TokError("'csync' operand expected");
3955 return MatchOperand_ParseFail;
3956 } else if (!DB && !TSB) {
3957 if (Mnemonic == "dsb") {
3958 // This case is a no match here, but it might be matched by the nXS
3959 // variant.
3960 return MatchOperand_NoMatch;
3961 }
3962 TokError("invalid barrier option name");
3963 return MatchOperand_ParseFail;
3964 }
3965
3966 Operands.push_back(AArch64Operand::CreateBarrier(
3967 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3968 getContext(), false /*hasnXSModifier*/));
3969 Lex(); // Consume the option
3970
3971 return MatchOperand_Success;
3972}
3973
3974OperandMatchResultTy
3975AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3976 const AsmToken &Tok = getTok();
3977
3978 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands")(static_cast <bool> (Mnemonic == "dsb" && "Instruction does not accept nXS operands"
) ? void (0) : __assert_fail ("Mnemonic == \"dsb\" && \"Instruction does not accept nXS operands\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3978
, __extension__ __PRETTY_FUNCTION__))
;
3979 if (Mnemonic != "dsb")
3980 return MatchOperand_ParseFail;
3981
3982 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3983 // Immediate operand.
3984 const MCExpr *ImmVal;
3985 SMLoc ExprLoc = getLoc();
3986 if (getParser().parseExpression(ImmVal))
3987 return MatchOperand_ParseFail;
3988 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3989 if (!MCE) {
3990 Error(ExprLoc, "immediate value expected for barrier operand");
3991 return MatchOperand_ParseFail;
3992 }
3993 int64_t Value = MCE->getValue();
3994 // v8.7-A DSB in the nXS variant accepts only the following immediate
3995 // values: 16, 20, 24, 28.
3996 if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
3997 Error(ExprLoc, "barrier operand out of range");
3998 return MatchOperand_ParseFail;
3999 }
4000 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4001 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4002 ExprLoc, getContext(),
4003 true /*hasnXSModifier*/));
4004 return MatchOperand_Success;
4005 }
4006
4007 if (Tok.isNot(AsmToken::Identifier)) {
4008 TokError("invalid operand for instruction");
4009 return MatchOperand_ParseFail;
4010 }
4011
4012 StringRef Operand = Tok.getString();
4013 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4014
4015 if (!DB) {
4016 TokError("invalid barrier option name");
4017 return MatchOperand_ParseFail;
4018 }
4019
4020 Operands.push_back(
4021 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4022 getContext(), true /*hasnXSModifier*/));
4023 Lex(); // Consume the option
4024
4025 return MatchOperand_Success;
4026}
4027
4028OperandMatchResultTy
4029AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4030 const AsmToken &Tok = getTok();
4031
4032 if (Tok.isNot(AsmToken::Identifier))
4033 return MatchOperand_NoMatch;
4034
4035 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4036 return MatchOperand_NoMatch;
4037
4038 int MRSReg, MSRReg;
4039 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4040 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4041 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4042 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4043 } else
4044 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4045
4046 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
4047 unsigned PStateImm = -1;
4048 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
4049 PStateImm = PState->Encoding;
4050
4051 Operands.push_back(
4052 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4053 PStateImm, getContext()));
4054 Lex(); // Eat identifier
4055
4056 return MatchOperand_Success;
4057}
4058
4059/// tryParseNeonVectorRegister - Parse a vector register operand.
4060bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4061 if (getTok().isNot(AsmToken::Identifier))
4062 return true;
4063
4064 SMLoc S = getLoc();
4065 // Check for a vector register specifier first.
4066 StringRef Kind;
4067 unsigned Reg;
4068 OperandMatchResultTy Res =
4069 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4070 if (Res != MatchOperand_Success)
4071 return true;
4072
4073 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4074 if (!KindRes)
4075 return true;
4076
4077 unsigned ElementWidth = KindRes->second;
4078 Operands.push_back(
4079 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4080 S, getLoc(), getContext()));
4081
4082 // If there was an explicit qualifier, that goes on as a literal text
4083 // operand.
4084 if (!Kind.empty())
4085 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4086
4087 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
4088}
4089
4090OperandMatchResultTy
4091AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4092 SMLoc SIdx = getLoc();
4093 if (parseOptionalToken(AsmToken::LBrac)) {
4094 const MCExpr *ImmVal;
4095 if (getParser().parseExpression(ImmVal))
4096 return MatchOperand_NoMatch;
4097 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4098 if (!MCE) {
4099 TokError("immediate value expected for vector index");
4100 return MatchOperand_ParseFail;;
4101 }
4102
4103 SMLoc E = getLoc();
4104
4105 if (parseToken(AsmToken::RBrac, "']' expected"))
4106 return MatchOperand_ParseFail;;
4107
4108 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4109 E, getContext()));
4110 return MatchOperand_Success;
4111 }
4112
4113 return MatchOperand_NoMatch;
4114}
4115
4116// tryParseVectorRegister - Try to parse a vector register name with
4117// optional kind specifier. If it is a register specifier, eat the token
4118// and return it.
4119OperandMatchResultTy
4120AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
4121 RegKind MatchKind) {
4122 const AsmToken &Tok = getTok();
4123
4124 if (Tok.isNot(AsmToken::Identifier))
4125 return MatchOperand_NoMatch;
4126
4127 StringRef Name = Tok.getString();
4128 // If there is a kind specifier, it's separated from the register name by
4129 // a '.'.
4130 size_t Start = 0, Next = Name.find('.');
4131 StringRef Head = Name.slice(Start, Next);
4132 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4133
4134 if (RegNum) {
4135 if (Next != StringRef::npos) {
4136 Kind = Name.slice(Next, StringRef::npos);
4137 if (!isValidVectorKind(Kind, MatchKind)) {
4138 TokError("invalid vector kind qualifier");
4139 return MatchOperand_ParseFail;
4140 }
4141 }
4142 Lex(); // Eat the register token.
4143
4144 Reg = RegNum;
4145 return MatchOperand_Success;
4146 }
4147
4148 return MatchOperand_NoMatch;
4149}
4150
4151/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4152template <RegKind RK> OperandMatchResultTy
4153AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4154 // Check for a SVE predicate register specifier first.
4155 const SMLoc S = getLoc();
4156 StringRef Kind;
4157 unsigned RegNum;
4158 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4159 if (Res != MatchOperand_Success)
4160 return Res;
4161
4162 const auto &KindRes = parseVectorKind(Kind, RK);
4163 if (!KindRes)
4164 return MatchOperand_NoMatch;
4165
4166 unsigned ElementWidth = KindRes->second;
4167 Operands.push_back(AArch64Operand::CreateVectorReg(
4168 RegNum, RK, ElementWidth, S,
4169 getLoc(), getContext()));
4170
4171 if (getLexer().is(AsmToken::LBrac)) {
4172 if (RK == RegKind::SVEPredicateAsCounter) {
4173 OperandMatchResultTy ResIndex = tryParseVectorIndex(Operands);
4174 if (ResIndex == MatchOperand_Success)
4175 return MatchOperand_Success;
4176 } else {
4177 // Indexed predicate, there's no comma so try parse the next operand
4178 // immediately.
4179 if (parseOperand(Operands, false, false))
4180 return MatchOperand_NoMatch;
4181 }
4182 }
4183
4184 // Not all predicates are followed by a '/m' or '/z'.
4185 if (getTok().isNot(AsmToken::Slash))
4186 return MatchOperand_Success;
4187
4188 // But when they do they shouldn't have an element type suffix.
4189 if (!Kind.empty()) {
4190 Error(S, "not expecting size suffix");
4191 return MatchOperand_ParseFail;
4192 }
4193
4194 // Add a literal slash as operand
4195 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4196
4197 Lex(); // Eat the slash.
4198
4199 // Zeroing or merging?
4200 auto Pred = getTok().getString().lower();
4201 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z") {
4202 Error(getLoc(), "expecting 'z' predication");
4203 return MatchOperand_ParseFail;
4204 }
4205
4206 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m") {
4207 Error(getLoc(), "expecting 'm' or 'z' predication");
4208 return MatchOperand_ParseFail;
4209 }
4210
4211 // Add zero/merge token.
4212 const char *ZM = Pred == "z" ? "z" : "m";
4213 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4214
4215 Lex(); // Eat zero/merge token.
4216 return MatchOperand_Success;
4217}
4218
4219/// parseRegister - Parse a register operand.
4220bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4221 // Try for a Neon vector register.
4222 if (!tryParseNeonVectorRegister(Operands))
4223 return false;
4224
4225 if (tryParseZTOperand(Operands) == MatchOperand_Success)
4226 return false;
4227
4228 // Otherwise try for a scalar register.
4229 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
4230 return false;
4231
4232 return true;
4233}
4234
4235bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4236 bool HasELFModifier = false;
4237 AArch64MCExpr::VariantKind RefKind;
4238
4239 if (parseOptionalToken(AsmToken::Colon)) {
4240 HasELFModifier = true;
4241
4242 if (getTok().isNot(AsmToken::Identifier))
4243 return TokError("expect relocation specifier in operand after ':'");
4244
4245 std::string LowerCase = getTok().getIdentifier().lower();
4246 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
4247 .Case("lo12", AArch64MCExpr::VK_LO12)
4248 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4249 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4250 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4251 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4252 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4253 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4254 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4255 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4256 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4257 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4258 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4259 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4260 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4261 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4262 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4263 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4264 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4265 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4266 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4267 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4268 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4269 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4270 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4271 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4272 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4273 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4274 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4275 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4276 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4277 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4278 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4279 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4280 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4281 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4282 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4283 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
4284 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4285 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4286 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
4287 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4288 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4289 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4290 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
4291 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4292 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4293 .Default(AArch64MCExpr::VK_INVALID);
4294
4295 if (RefKind == AArch64MCExpr::VK_INVALID)
4296 return TokError("expect relocation specifier in operand after ':'");
4297
4298 Lex(); // Eat identifier
4299
4300 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4301 return true;
4302 }
4303
4304 if (getParser().parseExpression(ImmVal))
4305 return true;
4306
4307 if (HasELFModifier)
4308 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4309
4310 return false;
4311}
4312
4313OperandMatchResultTy
4314AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4315 if (getTok().isNot(AsmToken::LCurly))
4316 return MatchOperand_NoMatch;
4317
4318 auto ParseMatrixTile = [this](unsigned &Reg, unsigned &ElementWidth) {
4319 StringRef Name = getTok().getString();
4320 size_t DotPosition = Name.find('.');
4321 if (DotPosition == StringRef::npos)
4322 return MatchOperand_NoMatch;
4323
4324 unsigned RegNum = matchMatrixTileListRegName(Name);
4325 if (!RegNum)
4326 return MatchOperand_NoMatch;
4327
4328 StringRef Tail = Name.drop_front(DotPosition);
4329 const Optional<std::pair<int, int>> &KindRes =
4330 parseVectorKind(Tail, RegKind::Matrix);
4331 if (!KindRes) {
4332 TokError("Expected the register to be followed by element width suffix");
4333 return MatchOperand_ParseFail;
4334 }
4335 ElementWidth = KindRes->second;
4336 Reg = RegNum;
4337 Lex(); // Eat the register.
4338 return MatchOperand_Success;
4339 };
4340
4341 SMLoc S = getLoc();
4342 auto LCurly = getTok();
4343 Lex(); // Eat left bracket token.
4344
4345 // Empty matrix list
4346 if (parseOptionalToken(AsmToken::RCurly)) {
4347 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4348 /*RegMask=*/0, S, getLoc(), getContext()));
4349 return MatchOperand_Success;
4350 }
4351
4352 // Try parse {za} alias early
4353 if (getTok().getString().equals_insensitive("za")) {
4354 Lex(); // Eat 'za'
4355
4356 if (parseToken(AsmToken::RCurly, "'}' expected"))
4357 return MatchOperand_ParseFail;
4358
4359 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4360 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4361 return MatchOperand_Success;
4362 }
4363
4364 SMLoc TileLoc = getLoc();
4365
4366 unsigned FirstReg, ElementWidth;
4367 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4368 if (ParseRes != MatchOperand_Success) {
4369 getLexer().UnLex(LCurly);
4370 return ParseRes;
4371 }
4372
4373 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4374
4375 unsigned PrevReg = FirstReg;
4376
4377 SmallSet<unsigned, 8> DRegs;
4378 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4379
4380 SmallSet<unsigned, 8> SeenRegs;
4381 SeenRegs.insert(FirstReg);
4382
4383 while (parseOptionalToken(AsmToken::Comma)) {
4384 TileLoc = getLoc();
4385 unsigned Reg, NextElementWidth;
4386 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4387 if (ParseRes != MatchOperand_Success)
4388 return ParseRes;
4389
4390 // Element size must match on all regs in the list.
4391 if (ElementWidth != NextElementWidth) {
4392 Error(TileLoc, "mismatched register size suffix");
4393 return MatchOperand_ParseFail;
4394 }
4395
4396 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4397 Warning(TileLoc, "tile list not in ascending order");
4398
4399 if (SeenRegs.contains(Reg))
4400 Warning(TileLoc, "duplicate tile in list");
4401 else {
4402 SeenRegs.insert(Reg);
4403 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4404 }
4405
4406 PrevReg = Reg;
4407 }
4408
4409 if (parseToken(AsmToken::RCurly, "'}' expected"))
4410 return MatchOperand_ParseFail;
4411
4412 unsigned RegMask = 0;
4413 for (auto Reg : DRegs)
4414 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4415 RI->getEncodingValue(AArch64::ZAD0));
4416 Operands.push_back(
4417 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4418
4419 return MatchOperand_Success;
4420}
4421
4422template <RegKind VectorKind>
4423OperandMatchResultTy
4424AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4425 bool ExpectMatch) {
4426 MCAsmParser &Parser = getParser();
4427 if (!getTok().is(AsmToken::LCurly))
4428 return MatchOperand_NoMatch;
4429
4430 // Wrapper around parse function
4431 auto ParseVector = [this](unsigned &Reg, StringRef &Kind, SMLoc Loc,
4432 bool NoMatchIsError) {
4433 auto RegTok = getTok();
4434 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4435 if (ParseRes == MatchOperand_Success) {
4436 if (parseVectorKind(Kind, VectorKind))
4437 return ParseRes;
4438 llvm_unreachable("Expected a valid vector kind")::llvm::llvm_unreachable_internal("Expected a valid vector kind"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 4438
)
;
4439 }
4440
4441 if (RegTok.is(AsmToken::Identifier) && ParseRes == MatchOperand_NoMatch &&
4442 RegTok.getString().equals_insensitive("zt0"))
4443 return MatchOperand_NoMatch;
4444
4445 if (RegTok.isNot(AsmToken::Identifier) ||
4446 ParseRes == MatchOperand_ParseFail ||
4447 (ParseRes == MatchOperand_NoMatch && NoMatchIsError &&
4448 !RegTok.getString().startswith_insensitive("za"))) {
4449 Error(Loc, "vector register expected");
4450 return MatchOperand_ParseFail;
4451 }
4452
4453 return MatchOperand_NoMatch;
4454 };
4455
4456 int NumRegs = getNumRegsForRegKind(VectorKind);
4457 SMLoc S = getLoc();
4458 auto LCurly = getTok();
4459 Lex(); // Eat left bracket token.
4460
4461 StringRef Kind;
4462 unsigned FirstReg;
4463 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4464
4465 // Put back the original left bracket if there was no match, so that
4466 // different types of list-operands can be matched (e.g. SVE, Neon).
4467 if (ParseRes == MatchOperand_NoMatch)
4468 Parser.getLexer().UnLex(LCurly);
4469
4470 if (ParseRes != MatchOperand_Success)
4471 return ParseRes;
4472
4473 int64_t PrevReg = FirstReg;
4474 unsigned Count = 1;
4475
4476 int Stride = 1;
4477 if (parseOptionalToken(AsmToken::Minus)) {
4478 SMLoc Loc = getLoc();
4479 StringRef NextKind;
4480
4481 unsigned Reg;
4482 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4483 if (ParseRes != MatchOperand_Success)
4484 return ParseRes;
4485
4486 // Any Kind suffices must match on all regs in the list.
4487 if (Kind != NextKind) {
4488 Error(Loc, "mismatched register size suffix");
4489 return MatchOperand_ParseFail;
4490 }
4491
4492 unsigned Space =
4493 (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4494
4495 if (Space == 0 || Space > 3) {
4496 Error(Loc, "invalid number of vectors");
4497 return MatchOperand_ParseFail;
4498 }
4499
4500 Count += Space;
4501 }
4502 else {
4503 bool HasCalculatedStride = false;
4504 while (parseOptionalToken(AsmToken::Comma)) {
4505 SMLoc Loc = getLoc();
4506 StringRef NextKind;
4507 unsigned Reg;
4508 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4509 if (ParseRes != MatchOperand_Success)
4510 return ParseRes;
4511
4512 // Any Kind suffices must match on all regs in the list.
4513 if (Kind != NextKind) {
4514 Error(Loc, "mismatched register size suffix");
4515 return MatchOperand_ParseFail;
4516 }
4517
4518 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4519 unsigned PrevRegVal =
4520 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4521 if (!HasCalculatedStride) {
4522 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4523 : (RegVal + NumRegs - PrevRegVal);
4524 HasCalculatedStride = true;
4525 }
4526
4527 // Register must be incremental (with a wraparound at last register).
4528 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs)) {
4529 Error(Loc, "registers must have the same sequential stride");
4530 return MatchOperand_ParseFail;
4531 }
4532
4533 PrevReg = Reg;
4534 ++Count;
4535 }
4536 }
4537
4538 if (parseToken(AsmToken::RCurly, "'}' expected"))
4539 return MatchOperand_ParseFail;
4540
4541 if (Count > 4) {
4542 Error(S, "invalid number of vectors");
4543 return MatchOperand_ParseFail;
4544 }
4545
4546 unsigned NumElements = 0;
4547 unsigned ElementWidth = 0;
4548 if (!Kind.empty()) {
4549 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4550 std::tie(NumElements, ElementWidth) = *VK;
4551 }
4552
4553 Operands.push_back(AArch64Operand::CreateVectorList(
4554 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4555 getLoc(), getContext()));
4556
4557 return MatchOperand_Success;
4558}
4559
4560/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4561bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4562 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4563 if (ParseRes != MatchOperand_Success)
4564 return true;
4565
4566 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
4567}
4568
4569OperandMatchResultTy
4570AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4571 SMLoc StartLoc = getLoc();
4572
4573 unsigned RegNum;
4574 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4575 if (Res != MatchOperand_Success)
4576 return Res;
4577
4578 if (!parseOptionalToken(AsmToken::Comma)) {
4579 Operands.push_back(AArch64Operand::CreateReg(
4580 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4581 return MatchOperand_Success;
4582 }
4583
4584 parseOptionalToken(AsmToken::Hash);
4585
4586 if (getTok().isNot(AsmToken::Integer)) {
4587 Error(getLoc(), "index must be absent or #0");
4588 return MatchOperand_ParseFail;
4589 }
4590
4591 const MCExpr *ImmVal;
4592 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4593 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
4594 Error(getLoc(), "index must be absent or #0");
4595 return MatchOperand_ParseFail;
4596 }
4597
4598 Operands.push_back(AArch64Operand::CreateReg(
4599 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4600 return MatchOperand_Success;
4601}
4602
4603OperandMatchResultTy
4604AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4605 SMLoc StartLoc = getLoc();
4606 const AsmToken &Tok = getTok();
4607 std::string Name = Tok.getString().lower();
4608
4609 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4610
4611 if (RegNum == 0)
4612 return MatchOperand_NoMatch;
4613
4614 Operands.push_back(AArch64Operand::CreateReg(
4615 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4616 Lex(); // Eat identifier token.
4617
4618 // Check if register is followed by an index
4619 if (parseOptionalToken(AsmToken::LBrac)) {
4620 const MCExpr *ImmVal;
4621 if (getParser().parseExpression(ImmVal))
4622 return MatchOperand_NoMatch;
4623 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4624 if (!MCE) {
4625 TokError("immediate value expected for vector index");
4626 return MatchOperand_ParseFail;
4627 }
4628 if (parseToken(AsmToken::RBrac, "']' expected"))
4629 return MatchOperand_ParseFail;
4630
4631 Operands.push_back(AArch64Operand::CreateImm(
4632 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4633 getLoc(), getContext()));
4634 }
4635
4636 return MatchOperand_Success;
4637}
4638
4639template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4640OperandMatchResultTy
4641AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4642 SMLoc StartLoc = getLoc();
4643
4644 unsigned RegNum;
4645 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4646 if (Res != MatchOperand_Success)
4647 return Res;
4648
4649 // No shift/extend is the default.
4650 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4651 Operands.push_back(AArch64Operand::CreateReg(
4652 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4653 return MatchOperand_Success;
4654 }
4655
4656 // Eat the comma
4657 Lex();
4658
4659 // Match the shift
4660 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
4661 Res = tryParseOptionalShiftExtend(ExtOpnd);
4662 if (Res != MatchOperand_Success)
4663 return Res;
4664
4665 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4666 Operands.push_back(AArch64Operand::CreateReg(
4667 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4668 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4669 Ext->hasShiftExtendAmount()));
4670
4671 return MatchOperand_Success;
4672}
4673
4674bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4675 MCAsmParser &Parser = getParser();
4676
4677 // Some SVE instructions have a decoration after the immediate, i.e.
4678 // "mul vl". We parse them here and add tokens, which must be present in the
4679 // asm string in the tablegen instruction.
4680 bool NextIsVL =
4681 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4682 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4683 if (!getTok().getString().equals_insensitive("mul") ||
4684 !(NextIsVL || NextIsHash))
4685 return true;
4686
4687 Operands.push_back(
4688 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4689 Lex(); // Eat the "mul"
4690
4691 if (NextIsVL) {
4692 Operands.push_back(
4693 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4694 Lex(); // Eat the "vl"
4695 return false;
4696 }
4697
4698 if (NextIsHash) {
4699 Lex(); // Eat the #
4700 SMLoc S = getLoc();
4701
4702 // Parse immediate operand.
4703 const MCExpr *ImmVal;
4704 if (!Parser.parseExpression(ImmVal))
4705 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4706 Operands.push_back(AArch64Operand::CreateImm(
4707 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4708 getContext()));
4709 return MatchOperand_Success;
4710 }
4711 }
4712
4713 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4714}
4715
4716bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4717 StringRef &VecGroup) {
4718 MCAsmParser &Parser = getParser();
4719 auto Tok = Parser.getTok();
4720 if (Tok.isNot(AsmToken::Identifier))
4721 return true;
4722
4723 StringRef VG = StringSwitch<StringRef>(Tok.getString().lower())
4724 .Case("vgx2", "vgx2")
4725 .Case("vgx4", "vgx4")
4726 .Default("");
4727
4728 if (VG.empty())
4729 return true;
4730
4731 VecGroup = VG;
4732 Parser.Lex(); // Eat vgx[2|4]
4733 return false;
4734}
4735
4736bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4737 auto Tok = getTok();
4738 if (Tok.isNot(AsmToken::Identifier))
4739 return true;
4740
4741 auto Keyword = Tok.getString();
4742 Keyword = StringSwitch<StringRef>(Keyword.lower())
4743 .Case("sm", "sm")
4744 .Case("za", "za")
4745 .Default(Keyword);
4746 Operands.push_back(
4747 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4748
4749 Lex();
4750 return false;
4751}
4752
4753/// parseOperand - Parse a arm instruction operand. For now this parses the
4754/// operand regardless of the mnemonic.
4755bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4756 bool invertCondCode) {
4757 MCAsmParser &Parser = getParser();
4758
4759 OperandMatchResultTy ResTy =
4760 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
4761
4762 // Check if the current operand has a custom associated parser, if so, try to
4763 // custom parse the operand, or fallback to the general approach.
4764 if (ResTy == MatchOperand_Success)
4765 return false;
4766 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4767 // there was a match, but an error occurred, in which case, just return that
4768 // the operand parsing failed.
4769 if (ResTy == MatchOperand_ParseFail)
4770 return true;
4771
4772 // Nothing custom, so do general case parsing.
4773 SMLoc S, E;
4774 switch (getLexer().getKind()) {
4775 default: {
4776 SMLoc S = getLoc();
4777 const MCExpr *Expr;
4778 if (parseSymbolicImmVal(Expr))
4779 return Error(S, "invalid operand");
4780
4781 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4782 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4783 return false;
4784 }
4785 case AsmToken::LBrac: {
4786 Operands.push_back(
4787 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4788 Lex(); // Eat '['
4789
4790 // There's no comma after a '[', so we can parse the next operand
4791 // immediately.
4792 return parseOperand(Operands, false, false);
4793 }
4794 case AsmToken::LCurly: {
4795 if (!parseNeonVectorList(Operands))
4796 return false;
4797
4798 Operands.push_back(
4799 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4800 Lex(); // Eat '{'
4801
4802 // There's no comma after a '{', so we can parse the next operand
4803 // immediately.
4804 return parseOperand(Operands, false, false);
4805 }
4806 case AsmToken::Identifier: {
4807 // See if this is a "VG" decoration used by SME instructions.
4808 StringRef VecGroup;
4809 if (!parseOptionalVGOperand(Operands, VecGroup)) {
4810 Operands.push_back(
4811 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4812 return false;
4813 }
4814 // If we're expecting a Condition Code operand, then just parse that.
4815 if (isCondCode)
4816 return parseCondCode(Operands, invertCondCode);
4817
4818 // If it's a register name, parse it.
4819 if (!parseRegister(Operands))
4820 return false;
4821
4822 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4823 // by SVE instructions.
4824 if (!parseOptionalMulOperand(Operands))
4825 return false;
4826
4827 // If this is an "smstart" or "smstop" instruction, parse its special
4828 // keyword operand as an identifier.
4829 if (Mnemonic == "smstart" || Mnemonic == "smstop")
4830 return parseKeywordOperand(Operands);
4831
4832 // This could be an optional "shift" or "extend" operand.
4833 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
4834 // We can only continue if no tokens were eaten.
4835 if (GotShift != MatchOperand_NoMatch)
4836 return GotShift;
4837
4838 // If this is a two-word mnemonic, parse its special keyword
4839 // operand as an identifier.
4840 if (Mnemonic == "brb")
4841 return parseKeywordOperand(Operands);
4842
4843 // This was not a register so parse other operands that start with an
4844 // identifier (like labels) as expressions and create them as immediates.
4845 const MCExpr *IdVal;
4846 S = getLoc();
4847 if (getParser().parseExpression(IdVal))
4848 return true;
4849 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4850 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4851 return false;
4852 }
4853 case AsmToken::Integer:
4854 case AsmToken::Real:
4855 case AsmToken::Hash: {
4856 // #42 -> immediate.
4857 S = getLoc();
4858
4859 parseOptionalToken(AsmToken::Hash);
4860
4861 // Parse a negative sign
4862 bool isNegative = false;
4863 if (getTok().is(AsmToken::Minus)) {
4864 isNegative = true;
4865 // We need to consume this token only when we have a Real, otherwise
4866 // we let parseSymbolicImmVal take care of it
4867 if (Parser.getLexer().peekTok().is(AsmToken::Real))
4868 Lex();
4869 }
4870
4871 // The only Real that should come through here is a literal #0.0 for
4872 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4873 // so convert the value.
4874 const AsmToken &Tok = getTok();
4875 if (Tok.is(AsmToken::Real)) {
4876 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4877 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4878 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4879 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4880 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4881 return TokError("unexpected floating point literal");
4882 else if (IntVal != 0 || isNegative)
4883 return TokError("expected floating-point constant #0.0");
4884 Lex(); // Eat the token.
4885
4886 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4887 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4888 return false;
4889 }
4890
4891 const MCExpr *ImmVal;
4892 if (parseSymbolicImmVal(ImmVal))
4893 return true;
4894
4895 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4896 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4897 return false;
4898 }
4899 case AsmToken::Equal: {
4900 SMLoc Loc = getLoc();
4901 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4902 return TokError("unexpected token in operand");
4903 Lex(); // Eat '='
4904 const MCExpr *SubExprVal;
4905 if (getParser().parseExpression(SubExprVal))
4906 return true;
4907
4908 if (Operands.size() < 2 ||
4909 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4910 return Error(Loc, "Only valid when first operand is register");
4911
4912 bool IsXReg =
4913 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4914 Operands[1]->getReg());
4915
4916 MCContext& Ctx = getContext();
4917 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4918 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4919 if (isa<MCConstantExpr>(SubExprVal)) {
4920 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4921 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4922 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
4923 ShiftAmt += 16;
4924 Imm >>= 16;
4925 }
4926 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4927 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4928 Operands.push_back(AArch64Operand::CreateImm(
4929 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4930 if (ShiftAmt)
4931 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4932 ShiftAmt, true, S, E, Ctx));
4933 return false;
4934 }
4935 APInt Simm = APInt(64, Imm << ShiftAmt);
4936 // check if the immediate is an unsigned or signed 32-bit int for W regs
4937 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4938 return Error(Loc, "Immediate too large for register");
4939 }
4940 // If it is a label or an imm that cannot fit in a movz, put it into CP.
4941 const MCExpr *CPLoc =
4942 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4943 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4944 return false;
4945 }
4946 }
4947}
4948
4949bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4950 const MCExpr *Expr = nullptr;
4951 SMLoc L = getLoc();
4952 if (check(getParser().parseExpression(Expr), L, "expected expression"))
4953 return true;
4954 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4955 if (check(!Value, L, "expected constant expression"))
4956 return true;
4957 Out = Value->getValue();
4958 return false;
4959}
4960
4961bool AArch64AsmParser::parseComma() {
4962 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
4963 return true;
4964 // Eat the comma
4965 Lex();
4966 return false;
4967}
4968
4969bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
4970 unsigned First, unsigned Last) {
4971 unsigned Reg;
4972 SMLoc Start, End;
4973 if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register"))
4974 return true;
4975
4976 // Special handling for FP and LR; they aren't linearly after x28 in
4977 // the registers enum.
4978 unsigned RangeEnd = Last;
4979 if (Base == AArch64::X0) {
4980 if (Last == AArch64::FP) {
4981 RangeEnd = AArch64::X28;
4982 if (Reg == AArch64::FP) {
4983 Out = 29;
4984 return false;
4985 }
4986 }
4987 if (Last == AArch64::LR) {
4988 RangeEnd = AArch64::X28;
4989 if (Reg == AArch64::FP) {
4990 Out = 29;
4991 return false;
4992 } else if (Reg == AArch64::LR) {
4993 Out = 30;
4994 return false;
4995 }
4996 }
4997 }
4998
4999 if (check(Reg < First || Reg > RangeEnd, Start,
5000 Twine("expected register in range ") +
5001 AArch64InstPrinter::getRegisterName(First) + " to " +
5002 AArch64InstPrinter::getRegisterName(Last)))
5003 return true;
5004 Out = Reg - Base;
5005 return false;
5006}
5007
5008bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5009 const MCParsedAsmOperand &Op2) const {
5010 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5011 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5012
5013 if (AOp1.isVectorList() && AOp2.isVectorList())
5014 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5015 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5016 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5017
5018 if (!AOp1.isReg() || !AOp2.isReg())
5019 return false;
5020
5021 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5022 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5023 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5024
5025 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&(static_cast <bool> (AOp1.isScalarReg() && AOp2
.isScalarReg() && "Testing equality of non-scalar registers not supported"
) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5026
, __extension__ __PRETTY_FUNCTION__))
5026 "Testing equality of non-scalar registers not supported")(static_cast <bool> (AOp1.isScalarReg() && AOp2
.isScalarReg() && "Testing equality of non-scalar registers not supported"
) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5026
, __extension__ __PRETTY_FUNCTION__))
;
5027
5028 // Check if a registers match their sub/super register classes.
5029 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5030 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5031 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5032 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5033 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5034 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5035 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5036 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5037
5038 return false;
5039}
5040
5041/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
5042/// operands.
5043bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
5044 StringRef Name, SMLoc NameLoc,
5045 OperandVector &Operands) {
5046 Name = StringSwitch<StringRef>(Name.lower())
5047 .Case("beq", "b.eq")
5048 .Case("bne", "b.ne")
5049 .Case("bhs", "b.hs")
5050 .Case("bcs", "b.cs")
5051 .Case("blo", "b.lo")
5052 .Case("bcc", "b.cc")
5053 .Case("bmi", "b.mi")
5054 .Case("bpl", "b.pl")
5055 .Case("bvs", "b.vs")
5056 .Case("bvc", "b.vc")
5057 .Case("bhi", "b.hi")
5058 .Case("bls", "b.ls")
5059 .Case("bge", "b.ge")
5060 .Case("blt", "b.lt")
5061 .Case("bgt", "b.gt")
5062 .Case("ble", "b.le")
5063 .Case("bal", "b.al")
5064 .Case("bnv", "b.nv")
5065 .Default(Name);
5066
5067 // First check for the AArch64-specific .req directive.
5068 if (getTok().is(AsmToken::Identifier) &&
5069 getTok().getIdentifier().lower() == ".req") {
5070 parseDirectiveReq(Name, NameLoc);
5071 // We always return 'error' for this, as we're done with this
5072 // statement and don't need to match the 'instruction."
5073 return true;
5074 }
5075
5076 // Create the leading tokens for the mnemonic, split by '.' characters.
5077 size_t Start = 0, Next = Name.find('.');
5078 StringRef Head = Name.slice(Start, Next);
5079
5080 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
5081 // the SYS instruction.
5082 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5083 Head == "cfp" || Head == "dvp" || Head == "cpp")
5084 return parseSysAlias(Head, NameLoc, Operands);
5085
5086 // TLBIP instructions are aliases for the SYSP instruction.
5087 if (Head == "tlbip")
5088 return parseSyspAlias(Head, NameLoc, Operands);
5089
5090 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5091 Mnemonic = Head;
5092
5093 // Handle condition codes for a branch mnemonic
5094 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5095 Start = Next;
5096 Next = Name.find('.', Start + 1);
5097 Head = Name.slice(Start + 1, Next);
5098
5099 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5100 (Head.data() - Name.data()));
5101 std::string Suggestion;
5102 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5103 if (CC == AArch64CC::Invalid) {
5104 std::string Msg = "invalid condition code";
5105 if (!Suggestion.empty())
5106 Msg += ", did you mean " + Suggestion + "?";
5107 return Error(SuffixLoc, Msg);
5108 }
5109 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5110 /*IsSuffix=*/true));
5111 Operands.push_back(
5112 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5113 }
5114
5115 // Add the remaining tokens in the mnemonic.
5116 while (Next != StringRef::npos) {
5117 Start = Next;
5118 Next = Name.find('.', Start + 1);
5119 Head = Name.slice(Start, Next);
5120 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5121 (Head.data() - Name.data()) + 1);
5122 Operands.push_back(AArch64Operand::CreateToken(
5123 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5124 }
5125
5126 // Conditional compare instructions have a Condition Code operand, which needs
5127 // to be parsed and an immediate operand created.
5128 bool condCodeFourthOperand =
5129 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5130 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5131 Head == "csinc" || Head == "csinv" || Head == "csneg");
5132
5133 // These instructions are aliases to some of the conditional select
5134 // instructions. However, the condition code is inverted in the aliased
5135 // instruction.
5136 //
5137 // FIXME: Is this the correct way to handle these? Or should the parser
5138 // generate the aliased instructions directly?
5139 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5140 bool condCodeThirdOperand =
5141 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5142
5143 // Read the remaining operands.
5144 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5145
5146 unsigned N = 1;
5147 do {
5148 // Parse and remember the operand.
5149 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5150 (N == 3 && condCodeThirdOperand) ||
5151 (N == 2 && condCodeSecondOperand),
5152 condCodeSecondOperand || condCodeThirdOperand)) {
5153 return true;
5154 }
5155
5156 // After successfully parsing some operands there are three special cases
5157 // to consider (i.e. notional operands not separated by commas). Two are
5158 // due to memory specifiers:
5159 // + An RBrac will end an address for load/store/prefetch
5160 // + An '!' will indicate a pre-indexed operation.
5161 //
5162 // And a further case is '}', which ends a group of tokens specifying the
5163 // SME accumulator array 'ZA' or tile vector, i.e.
5164 //
5165 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5166 //
5167 // It's someone else's responsibility to make sure these tokens are sane
5168 // in the given context!
5169
5170 if (parseOptionalToken(AsmToken::RBrac))
5171 Operands.push_back(
5172 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5173 if (parseOptionalToken(AsmToken::Exclaim))
5174 Operands.push_back(
5175 AArch64Operand::CreateToken("!", getLoc(), getContext()));
5176 if (parseOptionalToken(AsmToken::RCurly))
5177 Operands.push_back(
5178 AArch64Operand::CreateToken("}", getLoc(), getContext()));
5179
5180 ++N;
5181 } while (parseOptionalToken(AsmToken::Comma));
5182 }
5183
5184 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5185 return true;
5186
5187 return false;
5188}
5189
5190static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
5191 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31))(static_cast <bool> ((ZReg >= AArch64::Z0) &&
(ZReg <= AArch64::Z31)) ? void (0) : __assert_fail ("(ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5191
, __extension__ __PRETTY_FUNCTION__))
;
5192 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5193 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5194 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5195 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5196 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5197 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5198}
5199
5200// FIXME: This entire function is a giant hack to provide us with decent
5201// operand range validation/diagnostics until TableGen/MC can be extended
5202// to support autogeneration of this kind of validation.
5203bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5204 SmallVectorImpl<SMLoc> &Loc) {
5205 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5206 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5207
5208 // A prefix only applies to the instruction following it. Here we extract
5209 // prefix information for the next instruction before validating the current
5210 // one so that in the case of failure we don't erronously continue using the
5211 // current prefix.
5212 PrefixInfo Prefix = NextPrefix;
5213 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5214
5215 // Before validating the instruction in isolation we run through the rules
5216 // applicable when it follows a prefix instruction.
5217 // NOTE: brk & hlt can be prefixed but require no additional validation.
5218 if (Prefix.isActive() &&
5219 (Inst.getOpcode() != AArch64::BRK) &&
5220 (Inst.getOpcode() != AArch64::HLT)) {
5221
5222 // Prefixed intructions must have a destructive operand.
5223 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
5224 AArch64::NotDestructive)
5225 return Error(IDLoc, "instruction is unpredictable when following a"
5226 " movprfx, suggest replacing movprfx with mov");
5227
5228 // Destination operands must match.
5229 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5230 return Error(Loc[0], "instruction is unpredictable when following a"
5231 " movprfx writing to a different destination");
5232
5233 // Destination operand must not be used in any other location.
5234 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5235 if (Inst.getOperand(i).isReg() &&
5236 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5237 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5238 return Error(Loc[0], "instruction is unpredictable when following a"
5239 " movprfx and destination also used as non-destructive"
5240 " source");
5241 }
5242
5243 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5244 if (Prefix.isPredicated()) {
5245 int PgIdx = -1;
5246
5247 // Find the instructions general predicate.
5248 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5249 if (Inst.getOperand(i).isReg() &&
5250 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5251 PgIdx = i;
5252 break;
5253 }
5254
5255 // Instruction must be predicated if the movprfx is predicated.
5256 if (PgIdx == -1 ||
5257 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
5258 return Error(IDLoc, "instruction is unpredictable when following a"
5259 " predicated movprfx, suggest using unpredicated movprfx");
5260
5261 // Instruction must use same general predicate as the movprfx.
5262 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5263 return Error(IDLoc, "instruction is unpredictable when following a"
5264 " predicated movprfx using a different general predicate");
5265
5266 // Instruction element type must match the movprfx.
5267 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5268 return Error(IDLoc, "instruction is unpredictable when following a"
5269 " predicated movprfx with a different element size");
5270 }
5271 }
5272
5273 // Check for indexed addressing modes w/ the base register being the
5274 // same as a destination/source register or pair load where
5275 // the Rt == Rt2. All of those are undefined behaviour.
5276 switch (Inst.getOpcode()) {
5277 case AArch64::LDPSWpre:
5278 case AArch64::LDPWpost:
5279 case AArch64::LDPWpre:
5280 case AArch64::LDPXpost:
5281 case AArch64::LDPXpre: {
5282 unsigned Rt = Inst.getOperand(1).getReg();
5283 unsigned Rt2 = Inst.getOperand(2).getReg();
5284 unsigned Rn = Inst.getOperand(3).getReg();
5285 if (RI->isSubRegisterEq(Rn, Rt))
5286 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5287 "is also a destination");
5288 if (RI->isSubRegisterEq(Rn, Rt2))
5289 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5290 "is also a destination");
5291 [[fallthrough]];
5292 }
5293 case AArch64::LDPDi:
5294 case AArch64::LDPQi:
5295 case AArch64::LDPSi:
5296 case AArch64::LDPSWi:
5297 case AArch64::LDPWi:
5298 case AArch64::LDPXi: {
5299 unsigned Rt = Inst.getOperand(0).getReg();
5300 unsigned Rt2 = Inst.getOperand(1).getReg();
5301 if (Rt == Rt2)
5302 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5303 break;
5304 }
5305 case AArch64::LDPDpost:
5306 case AArch64::LDPDpre:
5307 case AArch64::LDPQpost:
5308 case AArch64::LDPQpre:
5309 case AArch64::LDPSpost:
5310 case AArch64::LDPSpre:
5311 case AArch64::LDPSWpost: {
5312 unsigned Rt = Inst.getOperand(1).getReg();
5313 unsigned Rt2 = Inst.getOperand(2).getReg();
5314 if (Rt == Rt2)
5315 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5316 break;
5317 }
5318 case AArch64::STPDpost:
5319 case AArch64::STPDpre:
5320 case AArch64::STPQpost:
5321 case AArch64::STPQpre:
5322 case AArch64::STPSpost:
5323 case AArch64::STPSpre:
5324 case AArch64::STPWpost:
5325 case AArch64::STPWpre:
5326 case AArch64::STPXpost:
5327 case AArch64::STPXpre: {
5328 unsigned Rt = Inst.getOperand(1).getReg();
5329 unsigned Rt2 = Inst.getOperand(2).getReg();
5330 unsigned Rn = Inst.getOperand(3).getReg();
5331 if (RI->isSubRegisterEq(Rn, Rt))
5332 return Error(Loc[0], "unpredictable STP instruction, writeback base "
5333 "is also a source");
5334 if (RI->isSubRegisterEq(Rn, Rt2))
5335 return Error(Loc[1], "unpredictable STP instruction, writeback base "
5336 "is also a source");
5337 break;
5338 }
5339 case AArch64::LDRBBpre:
5340 case AArch64::LDRBpre:
5341 case AArch64::LDRHHpre:
5342 case AArch64::LDRHpre:
5343 case AArch64::LDRSBWpre:
5344 case AArch64::LDRSBXpre:
5345 case AArch64::LDRSHWpre:
5346 case AArch64::LDRSHXpre:
5347 case AArch64::LDRSWpre:
5348 case AArch64::LDRWpre:
5349 case AArch64::LDRXpre:
5350 case AArch64::LDRBBpost:
5351 case AArch64::LDRBpost:
5352 case AArch64::LDRHHpost:
5353 case AArch64::LDRHpost:
5354 case AArch64::LDRSBWpost:
5355 case AArch64::LDRSBXpost:
5356 case AArch64::LDRSHWpost:
5357 case AArch64::LDRSHXpost:
5358 case AArch64::LDRSWpost:
5359 case AArch64::LDRWpost:
5360 case AArch64::LDRXpost: {
5361 unsigned Rt = Inst.getOperand(1).getReg();
5362 unsigned Rn = Inst.getOperand(2).getReg();
5363 if (RI->isSubRegisterEq(Rn, Rt))
5364 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5365 "is also a source");
5366 break;
5367 }
5368 case AArch64::STRBBpost:
5369 case AArch64::STRBpost:
5370 case AArch64::STRHHpost:
5371 case AArch64::STRHpost:
5372 case AArch64::STRWpost:
5373 case AArch64::STRXpost:
5374 case AArch64::STRBBpre:
5375 case AArch64::STRBpre:
5376 case AArch64::STRHHpre:
5377 case AArch64::STRHpre:
5378 case AArch64::STRWpre:
5379 case AArch64::STRXpre: {
5380 unsigned Rt = Inst.getOperand(1).getReg();
5381 unsigned Rn = Inst.getOperand(2).getReg();
5382 if (RI->isSubRegisterEq(Rn, Rt))
5383 return Error(Loc[0], "unpredictable STR instruction, writeback base "
5384 "is also a source");
5385 break;
5386 }
5387 case AArch64::STXRB:
5388 case AArch64::STXRH:
5389 case AArch64::STXRW:
5390 case AArch64::STXRX:
5391 case AArch64::STLXRB:
5392 case AArch64::STLXRH:
5393 case AArch64::STLXRW:
5394 case AArch64::STLXRX: {
5395 unsigned Rs = Inst.getOperand(0).getReg();
5396 unsigned Rt = Inst.getOperand(1).getReg();
5397 unsigned Rn = Inst.getOperand(2).getReg();
5398 if (RI->isSubRegisterEq(Rt, Rs) ||
5399 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5400 return Error(Loc[0],
5401 "unpredictable STXR instruction, status is also a source");
5402 break;
5403 }
5404 case AArch64::STXPW:
5405 case AArch64::STXPX:
5406 case AArch64::STLXPW:
5407 case AArch64::STLXPX: {
5408 unsigned Rs = Inst.getOperand(0).getReg();
5409 unsigned Rt1 = Inst.getOperand(1).getReg();
5410 unsigned Rt2 = Inst.getOperand(2).getReg();
5411 unsigned Rn = Inst.getOperand(3).getReg();
5412 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5413 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5414 return Error(Loc[0],
5415 "unpredictable STXP instruction, status is also a source");
5416 break;
5417 }
5418 case AArch64::LDRABwriteback:
5419 case AArch64::LDRAAwriteback: {
5420 unsigned Xt = Inst.getOperand(0).getReg();
5421 unsigned Xn = Inst.getOperand(1).getReg();
5422 if (Xt == Xn)
5423 return Error(Loc[0],
5424 "unpredictable LDRA instruction, writeback base"
5425 " is also a destination");
5426 break;
5427 }
5428 }
5429
5430 // Check v8.8-A memops instructions.
5431 switch (Inst.getOpcode()) {
5432 case AArch64::CPYFP:
5433 case AArch64::CPYFPWN:
5434 case AArch64::CPYFPRN:
5435 case AArch64::CPYFPN:
5436 case AArch64::CPYFPWT:
5437 case AArch64::CPYFPWTWN:
5438 case AArch64::CPYFPWTRN:
5439 case AArch64::CPYFPWTN:
5440 case AArch64::CPYFPRT:
5441 case AArch64::CPYFPRTWN:
5442 case AArch64::CPYFPRTRN:
5443 case AArch64::CPYFPRTN:
5444 case AArch64::CPYFPT:
5445 case AArch64::CPYFPTWN:
5446 case AArch64::CPYFPTRN:
5447 case AArch64::CPYFPTN:
5448 case AArch64::CPYFM:
5449 case AArch64::CPYFMWN:
5450 case AArch64::CPYFMRN:
5451 case AArch64::CPYFMN:
5452 case AArch64::CPYFMWT:
5453 case AArch64::CPYFMWTWN:
5454 case AArch64::CPYFMWTRN:
5455 case AArch64::CPYFMWTN:
5456 case AArch64::CPYFMRT:
5457 case AArch64::CPYFMRTWN:
5458 case AArch64::CPYFMRTRN:
5459 case AArch64::CPYFMRTN:
5460 case AArch64::CPYFMT:
5461 case AArch64::CPYFMTWN:
5462 case AArch64::CPYFMTRN:
5463 case AArch64::CPYFMTN:
5464 case AArch64::CPYFE:
5465 case AArch64::CPYFEWN:
5466 case AArch64::CPYFERN:
5467 case AArch64::CPYFEN:
5468 case AArch64::CPYFEWT:
5469 case AArch64::CPYFEWTWN:
5470 case AArch64::CPYFEWTRN:
5471 case AArch64::CPYFEWTN:
5472 case AArch64::CPYFERT:
5473 case AArch64::CPYFERTWN:
5474 case AArch64::CPYFERTRN:
5475 case AArch64::CPYFERTN:
5476 case AArch64::CPYFET:
5477 case AArch64::CPYFETWN:
5478 case AArch64::CPYFETRN:
5479 case AArch64::CPYFETN:
5480 case AArch64::CPYP:
5481 case AArch64::CPYPWN:
5482 case AArch64::CPYPRN:
5483 case AArch64::CPYPN:
5484 case AArch64::CPYPWT:
5485 case AArch64::CPYPWTWN:
5486 case AArch64::CPYPWTRN:
5487 case AArch64::CPYPWTN:
5488 case AArch64::CPYPRT:
5489 case AArch64::CPYPRTWN:
5490 case AArch64::CPYPRTRN:
5491 case AArch64::CPYPRTN:
5492 case AArch64::CPYPT:
5493 case AArch64::CPYPTWN:
5494 case AArch64::CPYPTRN:
5495 case AArch64::CPYPTN:
5496 case AArch64::CPYM:
5497 case AArch64::CPYMWN:
5498 case AArch64::CPYMRN:
5499 case AArch64::CPYMN:
5500 case AArch64::CPYMWT:
5501 case AArch64::CPYMWTWN:
5502 case AArch64::CPYMWTRN:
5503 case AArch64::CPYMWTN:
5504 case AArch64::CPYMRT:
5505 case AArch64::CPYMRTWN:
5506 case AArch64::CPYMRTRN:
5507 case AArch64::CPYMRTN:
5508 case AArch64::CPYMT:
5509 case AArch64::CPYMTWN:
5510 case AArch64::CPYMTRN:
5511 case AArch64::CPYMTN:
5512 case AArch64::CPYE:
5513 case AArch64::CPYEWN:
5514 case AArch64::CPYERN:
5515 case AArch64::CPYEN:
5516 case AArch64::CPYEWT:
5517 case AArch64::CPYEWTWN:
5518 case AArch64::CPYEWTRN:
5519 case AArch64::CPYEWTN:
5520 case AArch64::CPYERT:
5521 case AArch64::CPYERTWN:
5522 case AArch64::CPYERTRN:
5523 case AArch64::CPYERTN:
5524 case AArch64::CPYET:
5525 case AArch64::CPYETWN:
5526 case AArch64::CPYETRN:
5527 case AArch64::CPYETN: {
5528 unsigned Xd_wb = Inst.getOperand(0).getReg();
5529 unsigned Xs_wb = Inst.getOperand(1).getReg();
5530 unsigned Xn_wb = Inst.getOperand(2).getReg();
5531 unsigned Xd = Inst.getOperand(3).getReg();
5532 unsigned Xs = Inst.getOperand(4).getReg();
5533 unsigned Xn = Inst.getOperand(5).getReg();
5534 if (Xd_wb != Xd)
5535 return Error(Loc[0],
5536 "invalid CPY instruction, Xd_wb and Xd do not match");
5537 if (Xs_wb != Xs)
5538 return Error(Loc[0],
5539 "invalid CPY instruction, Xs_wb and Xs do not match");
5540 if (Xn_wb != Xn)
5541 return Error(Loc[0],
5542 "invalid CPY instruction, Xn_wb and Xn do not match");
5543 if (Xd == Xs)
5544 return Error(Loc[0], "invalid CPY instruction, destination and source"
5545 " registers are the same");
5546 if (Xd == Xn)
5547 return Error(Loc[0], "invalid CPY instruction, destination and size"
5548 " registers are the same");
5549 if (Xs == Xn)
5550 return Error(Loc[0], "invalid CPY instruction, source and size"
5551 " registers are the same");
5552 break;
5553 }
5554 case AArch64::SETP:
5555 case AArch64::SETPT:
5556 case AArch64::SETPN:
5557 case AArch64::SETPTN:
5558 case AArch64::SETM:
5559 case AArch64::SETMT:
5560 case AArch64::SETMN:
5561 case AArch64::SETMTN:
5562 case AArch64::SETE:
5563 case AArch64::SETET:
5564 case AArch64::SETEN:
5565 case AArch64::SETETN:
5566 case AArch64::SETGP:
5567 case AArch64::SETGPT:
5568 case AArch64::SETGPN:
5569 case AArch64::SETGPTN:
5570 case AArch64::SETGM:
5571 case AArch64::SETGMT:
5572 case AArch64::SETGMN:
5573 case AArch64::SETGMTN:
5574 case AArch64::MOPSSETGE:
5575 case AArch64::MOPSSETGET:
5576 case AArch64::MOPSSETGEN:
5577 case AArch64::MOPSSETGETN: {
5578 unsigned Xd_wb = Inst.getOperand(0).getReg();
5579 unsigned Xn_wb = Inst.getOperand(1).getReg();
5580 unsigned Xd = Inst.getOperand(2).getReg();
5581 unsigned Xn = Inst.getOperand(3).getReg();
5582 unsigned Xm = Inst.getOperand(4).getReg();
5583 if (Xd_wb != Xd)
5584 return Error(Loc[0],
5585 "invalid SET instruction, Xd_wb and Xd do not match");
5586 if (Xn_wb != Xn)
5587 return Error(Loc[0],
5588 "invalid SET instruction, Xn_wb and Xn do not match");
5589 if (Xd == Xn)
5590 return Error(Loc[0], "invalid SET instruction, destination and size"
5591 " registers are the same");
5592 if (Xd == Xm)
5593 return Error(Loc[0], "invalid SET instruction, destination and source"
5594 " registers are the same");
5595 if (Xn == Xm)
5596 return Error(Loc[0], "invalid SET instruction, source and size"
5597 " registers are the same");
5598 break;
5599 }
5600 }
5601
5602 // Now check immediate ranges. Separate from the above as there is overlap
5603 // in the instructions being checked and this keeps the nested conditionals
5604 // to a minimum.
5605 switch (Inst.getOpcode()) {
5606 case AArch64::ADDSWri:
5607 case AArch64::ADDSXri:
5608 case AArch64::ADDWri:
5609 case AArch64::ADDXri:
5610 case AArch64::SUBSWri:
5611 case AArch64::SUBSXri:
5612 case AArch64::SUBWri:
5613 case AArch64::SUBXri: {
5614 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5615 // some slight duplication here.
5616 if (Inst.getOperand(2).isExpr()) {
5617 const MCExpr *Expr = Inst.getOperand(2).getExpr();
5618 AArch64MCExpr::VariantKind ELFRefKind;
5619 MCSymbolRefExpr::VariantKind DarwinRefKind;
5620 int64_t Addend;
5621 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
5622
5623 // Only allow these with ADDXri.
5624 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
5625 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
5626 Inst.getOpcode() == AArch64::ADDXri)
5627 return false;
5628
5629 // Only allow these with ADDXri/ADDWri
5630 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
5631 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
5632 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
5633 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
5634 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
5635 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
5636 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
5637 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
5638 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
5639 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
5640 (Inst.getOpcode() == AArch64::ADDXri ||
5641 Inst.getOpcode() == AArch64::ADDWri))
5642 return false;
5643
5644 // Don't allow symbol refs in the immediate field otherwise
5645 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5646 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5647 // 'cmp w0, 'borked')
5648 return Error(Loc.back(), "invalid immediate expression");
5649 }
5650 // We don't validate more complex expressions here
5651 }
5652 return false;
5653 }
5654 default:
5655 return false;
5656 }
5657}
5658
5659static std::string AArch64MnemonicSpellCheck(StringRef S,
5660 const FeatureBitset &FBS,
5661 unsigned VariantID = 0);
5662
5663bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5664 uint64_t ErrorInfo,
5665 OperandVector &Operands) {
5666 switch (ErrCode) {
5667 case Match_InvalidTiedOperand: {
5668 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
5669 if (Op.isVectorList())
5670 return Error(Loc, "operand must match destination register list");
5671
5672 assert(Op.isReg() && "Unexpected operand type")(static_cast <bool> (Op.isReg() && "Unexpected operand type"
) ? void (0) : __assert_fail ("Op.isReg() && \"Unexpected operand type\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5672
, __extension__ __PRETTY_FUNCTION__))
;
5673 switch (Op.getRegEqualityTy()) {
5674 case RegConstraintEqualityTy::EqualsSubReg:
5675 return Error(Loc, "operand must be 64-bit form of destination register");
5676 case RegConstraintEqualityTy::EqualsSuperReg:
5677 return Error(Loc, "operand must be 32-bit form of destination register");
5678 case RegConstraintEqualityTy::EqualsReg:
5679 return Error(Loc, "operand must match destination register");
5680 }
5681 llvm_unreachable("Unknown RegConstraintEqualityTy")::llvm::llvm_unreachable_internal("Unknown RegConstraintEqualityTy"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5681
)
;
5682 }
5683 case Match_MissingFeature:
5684 return Error(Loc,
5685 "instruction requires a CPU feature not currently enabled");
5686 case Match_InvalidOperand:
5687 return Error(Loc, "invalid operand for instruction");
5688 case Match_InvalidSuffix:
5689 return Error(Loc, "invalid type suffix for instruction");
5690 case Match_InvalidCondCode:
5691 return Error(Loc, "expected AArch64 condition code");
5692 case Match_AddSubRegExtendSmall:
5693 return Error(Loc,
5694 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5695 case Match_AddSubRegExtendLarge:
5696 return Error(Loc,
5697 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5698 case Match_AddSubSecondSource:
5699 return Error(Loc,
5700 "expected compatible register, symbol or integer in range [0, 4095]");
5701 case Match_LogicalSecondSource:
5702 return Error(Loc, "expected compatible register or logical immediate");
5703 case Match_InvalidMovImm32Shift:
5704 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
5705 case Match_InvalidMovImm64Shift:
5706 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
5707 case Match_AddSubRegShift32:
5708 return Error(Loc,
5709 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5710 case Match_AddSubRegShift64:
5711 return Error(Loc,
5712 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5713 case Match_InvalidFPImm:
5714 return Error(Loc,
5715 "expected compatible register or floating-point constant");
5716 case Match_InvalidMemoryIndexedSImm6:
5717 return Error(Loc, "index must be an integer in range [-32, 31].");
5718 case Match_InvalidMemoryIndexedSImm5:
5719 return Error(Loc, "index must be an integer in range [-16, 15].");
5720 case Match_InvalidMemoryIndexed1SImm4:
5721 return Error(Loc, "index must be an integer in range [-8, 7].");
5722 case Match_InvalidMemoryIndexed2SImm4:
5723 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5724 case Match_InvalidMemoryIndexed3SImm4:
5725 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5726 case Match_InvalidMemoryIndexed4SImm4:
5727 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5728 case Match_InvalidMemoryIndexed16SImm4:
5729 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5730 case Match_InvalidMemoryIndexed32SImm4:
5731 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5732 case Match_InvalidMemoryIndexed1SImm6:
5733 return Error(Loc, "index must be an integer in range [-32, 31].");
5734 case Match_InvalidMemoryIndexedSImm8:
5735 return Error(Loc, "index must be an integer in range [-128, 127].");
5736 case Match_InvalidMemoryIndexedSImm9:
5737 return Error(Loc, "index must be an integer in range [-256, 255].");
5738 case Match_InvalidMemoryIndexed16SImm9:
5739 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5740 case Match_InvalidMemoryIndexed8SImm10:
5741 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5742 case Match_InvalidMemoryIndexed4SImm7:
5743 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5744 case Match_InvalidMemoryIndexed8SImm7:
5745 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5746 case Match_InvalidMemoryIndexed16SImm7:
5747 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5748 case Match_InvalidMemoryIndexed8UImm5:
5749 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5750 case Match_InvalidMemoryIndexed8UImm3:
5751 return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
5752 case Match_InvalidMemoryIndexed4UImm5:
5753 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5754 case Match_InvalidMemoryIndexed2UImm5:
5755 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5756 case Match_InvalidMemoryIndexed8UImm6:
5757 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5758 case Match_InvalidMemoryIndexed16UImm6:
5759 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5760 case Match_InvalidMemoryIndexed4UImm6:
5761 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5762 case Match_InvalidMemoryIndexed2UImm6:
5763 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5764 case Match_InvalidMemoryIndexed1UImm6:
5765 return Error(Loc, "index must be in range [0, 63].");
5766 case Match_InvalidMemoryWExtend8:
5767 return Error(Loc,
5768 "expected 'uxtw' or 'sxtw' with optional shift of #0");
5769 case Match_InvalidMemoryWExtend16:
5770 return Error(Loc,
5771 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5772 case Match_InvalidMemoryWExtend32:
5773 return Error(Loc,
5774 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5775 case Match_InvalidMemoryWExtend64:
5776 return Error(Loc,
5777 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5778 case Match_InvalidMemoryWExtend128:
5779 return Error(Loc,
5780 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5781 case Match_InvalidMemoryXExtend8:
5782 return Error(Loc,
5783 "expected 'lsl' or 'sxtx' with optional shift of #0");
5784 case Match_InvalidMemoryXExtend16:
5785 return Error(Loc,
5786 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5787 case Match_InvalidMemoryXExtend32:
5788 return Error(Loc,
5789 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5790 case Match_InvalidMemoryXExtend64:
5791 return Error(Loc,
5792 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5793 case Match_InvalidMemoryXExtend128:
5794 return Error(Loc,
5795 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5796 case Match_InvalidMemoryIndexed1:
5797 return Error(Loc, "index must be an integer in range [0, 4095].");
5798 case Match_InvalidMemoryIndexed2:
5799 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
5800 case Match_InvalidMemoryIndexed4:
5801 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
5802 case Match_InvalidMemoryIndexed8:
5803 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
5804 case Match_InvalidMemoryIndexed16:
5805 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
5806 case Match_InvalidImm0_0:
5807 return Error(Loc, "immediate must be 0.");
5808 case Match_InvalidImm0_1:
5809 return Error(Loc, "immediate must be an integer in range [0, 1].");
5810 case Match_InvalidImm0_3:
5811 return Error(Loc, "immediate must be an integer in range [0, 3].");
5812 case Match_InvalidImm0_7:
5813 return Error(Loc, "immediate must be an integer in range [0, 7].");
5814 case Match_InvalidImm0_15:
5815 return Error(Loc, "immediate must be an integer in range [0, 15].");
5816 case Match_InvalidImm0_31:
5817 return Error(Loc, "immediate must be an integer in range [0, 31].");
5818 case Match_InvalidImm0_63:
5819 return Error(Loc, "immediate must be an integer in range [0, 63].");
5820 case Match_InvalidImm0_127:
5821 return Error(Loc, "immediate must be an integer in range [0, 127].");
5822 case Match_InvalidImm0_255:
5823 return Error(Loc, "immediate must be an integer in range [0, 255].");
5824 case Match_InvalidImm0_65535:
5825 return Error(Loc, "immediate must be an integer in range [0, 65535].");
5826 case Match_InvalidImm1_8:
5827 return Error(Loc, "immediate must be an integer in range [1, 8].");
5828 case Match_InvalidImm1_16:
5829 return Error(Loc, "immediate must be an integer in range [1, 16].");
5830 case Match_InvalidImm1_32: