Bug Summary

File:build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Warning:line 4452, column 15
The left operand of '==' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name AArch64AsmParser.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/AArch64 -I lib/Target/AArch64 -I include -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/include -I lib/Target/AArch64/AsmParser/.. -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/AArch64/AsmParser/.. -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -O2 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -ferror-limit 19 -fvisibility=hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-10-03-140002-15933-1 -x c++ /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
10#include "MCTargetDesc/AArch64AddressingModes.h"
11#include "MCTargetDesc/AArch64InstPrinter.h"
12#include "MCTargetDesc/AArch64MCExpr.h"
13#include "MCTargetDesc/AArch64MCTargetDesc.h"
14#include "MCTargetDesc/AArch64TargetStreamer.h"
15#include "TargetInfo/AArch64TargetInfo.h"
16#include "Utils/AArch64BaseInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringExtras.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/StringSwitch.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
31#include "llvm/MC/MCLinkerOptimizationHint.h"
32#include "llvm/MC/MCObjectFileInfo.h"
33#include "llvm/MC/MCParser/MCAsmLexer.h"
34#include "llvm/MC/MCParser/MCAsmParser.h"
35#include "llvm/MC/MCParser/MCAsmParserExtension.h"
36#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
37#include "llvm/MC/MCParser/MCTargetAsmParser.h"
38#include "llvm/MC/MCRegisterInfo.h"
39#include "llvm/MC/MCStreamer.h"
40#include "llvm/MC/MCSubtargetInfo.h"
41#include "llvm/MC/MCSymbol.h"
42#include "llvm/MC/MCTargetOptions.h"
43#include "llvm/MC/MCValue.h"
44#include "llvm/MC/SubtargetFeature.h"
45#include "llvm/MC/TargetRegistry.h"
46#include "llvm/Support/Casting.h"
47#include "llvm/Support/Compiler.h"
48#include "llvm/Support/ErrorHandling.h"
49#include "llvm/Support/MathExtras.h"
50#include "llvm/Support/SMLoc.h"
51#include "llvm/Support/AArch64TargetParser.h"
52#include "llvm/Support/TargetParser.h"
53#include "llvm/Support/raw_ostream.h"
54#include <cassert>
55#include <cctype>
56#include <cstdint>
57#include <cstdio>
58#include <string>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65namespace {
66
67enum class RegKind {
68 Scalar,
69 NeonVector,
70 SVEDataVector,
71 SVEPredicateVector,
72 Matrix
73};
74
75enum class MatrixKind { Array, Tile, Row, Col };
76
77enum RegConstraintEqualityTy {
78 EqualsReg,
79 EqualsSuperReg,
80 EqualsSubReg
81};
82
83class AArch64AsmParser : public MCTargetAsmParser {
84private:
85 StringRef Mnemonic; ///< Instruction mnemonic.
86
87 // Map of register aliases registers via the .req directive.
88 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
89
90 class PrefixInfo {
91 public:
92 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
93 PrefixInfo Prefix;
94 switch (Inst.getOpcode()) {
95 case AArch64::MOVPRFX_ZZ:
96 Prefix.Active = true;
97 Prefix.Dst = Inst.getOperand(0).getReg();
98 break;
99 case AArch64::MOVPRFX_ZPmZ_B:
100 case AArch64::MOVPRFX_ZPmZ_H:
101 case AArch64::MOVPRFX_ZPmZ_S:
102 case AArch64::MOVPRFX_ZPmZ_D:
103 Prefix.Active = true;
104 Prefix.Predicated = true;
105 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
106 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 107
, __extension__ __PRETTY_FUNCTION__))
107 "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 107
, __extension__ __PRETTY_FUNCTION__))
;
108 Prefix.Dst = Inst.getOperand(0).getReg();
109 Prefix.Pg = Inst.getOperand(2).getReg();
110 break;
111 case AArch64::MOVPRFX_ZPzZ_B:
112 case AArch64::MOVPRFX_ZPzZ_H:
113 case AArch64::MOVPRFX_ZPzZ_S:
114 case AArch64::MOVPRFX_ZPzZ_D:
115 Prefix.Active = true;
116 Prefix.Predicated = true;
117 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
118 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 119
, __extension__ __PRETTY_FUNCTION__))
119 "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 119
, __extension__ __PRETTY_FUNCTION__))
;
120 Prefix.Dst = Inst.getOperand(0).getReg();
121 Prefix.Pg = Inst.getOperand(1).getReg();
122 break;
123 default:
124 break;
125 }
126
127 return Prefix;
128 }
129
130 PrefixInfo() = default;
131 bool isActive() const { return Active; }
132 bool isPredicated() const { return Predicated; }
133 unsigned getElementSize() const {
134 assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail
("Predicated", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 134, __extension__ __PRETTY_FUNCTION__))
;
135 return ElementSize;
136 }
137 unsigned getDstReg() const { return Dst; }
138 unsigned getPgReg() const {
139 assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail
("Predicated", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 139, __extension__ __PRETTY_FUNCTION__))
;
140 return Pg;
141 }
142
143 private:
144 bool Active = false;
145 bool Predicated = false;
146 unsigned ElementSize;
147 unsigned Dst;
148 unsigned Pg;
149 } NextPrefix;
150
151 AArch64TargetStreamer &getTargetStreamer() {
152 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
153 return static_cast<AArch64TargetStreamer &>(TS);
154 }
155
156 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
157
158 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
159 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
160 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
161 std::string &Suggestion);
162 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
163 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
164 bool parseRegister(OperandVector &Operands);
165 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
166 bool parseNeonVectorList(OperandVector &Operands);
167 bool parseOptionalMulOperand(OperandVector &Operands);
168 bool parseKeywordOperand(OperandVector &Operands);
169 bool parseOperand(OperandVector &Operands, bool isCondCode,
170 bool invertCondCode);
171 bool parseImmExpr(int64_t &Out);
172 bool parseComma();
173 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
174 unsigned Last);
175
176 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
177 OperandVector &Operands);
178
179 bool parseDirectiveArch(SMLoc L);
180 bool parseDirectiveArchExtension(SMLoc L);
181 bool parseDirectiveCPU(SMLoc L);
182 bool parseDirectiveInst(SMLoc L);
183
184 bool parseDirectiveTLSDescCall(SMLoc L);
185
186 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
187 bool parseDirectiveLtorg(SMLoc L);
188
189 bool parseDirectiveReq(StringRef Name, SMLoc L);
190 bool parseDirectiveUnreq(SMLoc L);
191 bool parseDirectiveCFINegateRAState();
192 bool parseDirectiveCFIBKeyFrame();
193 bool parseDirectiveCFIMTETaggedFrame();
194
195 bool parseDirectiveVariantPCS(SMLoc L);
196
197 bool parseDirectiveSEHAllocStack(SMLoc L);
198 bool parseDirectiveSEHPrologEnd(SMLoc L);
199 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
200 bool parseDirectiveSEHSaveFPLR(SMLoc L);
201 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
202 bool parseDirectiveSEHSaveReg(SMLoc L);
203 bool parseDirectiveSEHSaveRegX(SMLoc L);
204 bool parseDirectiveSEHSaveRegP(SMLoc L);
205 bool parseDirectiveSEHSaveRegPX(SMLoc L);
206 bool parseDirectiveSEHSaveLRPair(SMLoc L);
207 bool parseDirectiveSEHSaveFReg(SMLoc L);
208 bool parseDirectiveSEHSaveFRegX(SMLoc L);
209 bool parseDirectiveSEHSaveFRegP(SMLoc L);
210 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
211 bool parseDirectiveSEHSetFP(SMLoc L);
212 bool parseDirectiveSEHAddFP(SMLoc L);
213 bool parseDirectiveSEHNop(SMLoc L);
214 bool parseDirectiveSEHSaveNext(SMLoc L);
215 bool parseDirectiveSEHEpilogStart(SMLoc L);
216 bool parseDirectiveSEHEpilogEnd(SMLoc L);
217 bool parseDirectiveSEHTrapFrame(SMLoc L);
218 bool parseDirectiveSEHMachineFrame(SMLoc L);
219 bool parseDirectiveSEHContext(SMLoc L);
220 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
221
222 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
223 SmallVectorImpl<SMLoc> &Loc);
224 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
225 OperandVector &Operands, MCStreamer &Out,
226 uint64_t &ErrorInfo,
227 bool MatchingInlineAsm) override;
228/// @name Auto-generated Match Functions
229/// {
230
231#define GET_ASSEMBLER_HEADER
232#include "AArch64GenAsmMatcher.inc"
233
234 /// }
235
236 OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
237 OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
238 RegKind MatchKind);
239 OperandMatchResultTy tryParseMatrixRegister(OperandVector &Operands);
240 OperandMatchResultTy tryParseSVCR(OperandVector &Operands);
241 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
242 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
243 OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
244 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
245 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
246 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
247 template <bool IsSVEPrefetch = false>
248 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
249 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
250 OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
251 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
252 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
253 template<bool AddFPZeroAsLiteral>
254 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
255 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
256 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
257 bool tryParseNeonVectorRegister(OperandVector &Operands);
258 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
259 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
260 template <bool ParseShiftExtend,
261 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
262 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
263 template <bool ParseShiftExtend, bool ParseSuffix>
264 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
265 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
266 template <RegKind VectorKind>
267 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
268 bool ExpectMatch = false);
269 OperandMatchResultTy tryParseMatrixTileList(OperandVector &Operands);
270 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
271 OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands);
272
273public:
274 enum AArch64MatchResultTy {
275 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
276#define GET_OPERAND_DIAGNOSTIC_TYPES
277#include "AArch64GenAsmMatcher.inc"
278 };
279 bool IsILP32;
280
281 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
282 const MCInstrInfo &MII, const MCTargetOptions &Options)
283 : MCTargetAsmParser(Options, STI, MII) {
284 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
285 MCAsmParserExtension::Initialize(Parser);
286 MCStreamer &S = getParser().getStreamer();
287 if (S.getTargetStreamer() == nullptr)
288 new AArch64TargetStreamer(S);
289
290 // Alias .hword/.word/.[dx]word to the target-independent
291 // .2byte/.4byte/.8byte directives as they have the same form and
292 // semantics:
293 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
294 Parser.addAliasForDirective(".hword", ".2byte");
295 Parser.addAliasForDirective(".word", ".4byte");
296 Parser.addAliasForDirective(".dword", ".8byte");
297 Parser.addAliasForDirective(".xword", ".8byte");
298
299 // Initialize the set of available features.
300 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
301 }
302
303 bool regsEqual(const MCParsedAsmOperand &Op1,
304 const MCParsedAsmOperand &Op2) const override;
305 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
306 SMLoc NameLoc, OperandVector &Operands) override;
307 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
308 OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
309 SMLoc &EndLoc) override;
310 bool ParseDirective(AsmToken DirectiveID) override;
311 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
312 unsigned Kind) override;
313
314 static bool classifySymbolRef(const MCExpr *Expr,
315 AArch64MCExpr::VariantKind &ELFRefKind,
316 MCSymbolRefExpr::VariantKind &DarwinRefKind,
317 int64_t &Addend);
318};
319
320/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
321/// instruction.
322class AArch64Operand : public MCParsedAsmOperand {
323private:
324 enum KindTy {
325 k_Immediate,
326 k_ShiftedImm,
327 k_CondCode,
328 k_Register,
329 k_MatrixRegister,
330 k_MatrixTileList,
331 k_SVCR,
332 k_VectorList,
333 k_VectorIndex,
334 k_Token,
335 k_SysReg,
336 k_SysCR,
337 k_Prefetch,
338 k_ShiftExtend,
339 k_FPImm,
340 k_Barrier,
341 k_PSBHint,
342 k_BTIHint,
343 } Kind;
344
345 SMLoc StartLoc, EndLoc;
346
347 struct TokOp {
348 const char *Data;
349 unsigned Length;
350 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
351 };
352
353 // Separate shift/extend operand.
354 struct ShiftExtendOp {
355 AArch64_AM::ShiftExtendType Type;
356 unsigned Amount;
357 bool HasExplicitAmount;
358 };
359
360 struct RegOp {
361 unsigned RegNum;
362 RegKind Kind;
363 int ElementWidth;
364
365 // The register may be allowed as a different register class,
366 // e.g. for GPR64as32 or GPR32as64.
367 RegConstraintEqualityTy EqualityTy;
368
369 // In some cases the shift/extend needs to be explicitly parsed together
370 // with the register, rather than as a separate operand. This is needed
371 // for addressing modes where the instruction as a whole dictates the
372 // scaling/extend, rather than specific bits in the instruction.
373 // By parsing them as a single operand, we avoid the need to pass an
374 // extra operand in all CodeGen patterns (because all operands need to
375 // have an associated value), and we avoid the need to update TableGen to
376 // accept operands that have no associated bits in the instruction.
377 //
378 // An added benefit of parsing them together is that the assembler
379 // can give a sensible diagnostic if the scaling is not correct.
380 //
381 // The default is 'lsl #0' (HasExplicitAmount = false) if no
382 // ShiftExtend is specified.
383 ShiftExtendOp ShiftExtend;
384 };
385
386 struct MatrixRegOp {
387 unsigned RegNum;
388 unsigned ElementWidth;
389 MatrixKind Kind;
390 };
391
392 struct MatrixTileListOp {
393 unsigned RegMask = 0;
394 };
395
396 struct VectorListOp {
397 unsigned RegNum;
398 unsigned Count;
399 unsigned NumElements;
400 unsigned ElementWidth;
401 RegKind RegisterKind;
402 };
403
404 struct VectorIndexOp {
405 int Val;
406 };
407
408 struct ImmOp {
409 const MCExpr *Val;
410 };
411
412 struct ShiftedImmOp {
413 const MCExpr *Val;
414 unsigned ShiftAmount;
415 };
416
417 struct CondCodeOp {
418 AArch64CC::CondCode Code;
419 };
420
421 struct FPImmOp {
422 uint64_t Val; // APFloat value bitcasted to uint64_t.
423 bool IsExact; // describes whether parsed value was exact.
424 };
425
426 struct BarrierOp {
427 const char *Data;
428 unsigned Length;
429 unsigned Val; // Not the enum since not all values have names.
430 bool HasnXSModifier;
431 };
432
433 struct SysRegOp {
434 const char *Data;
435 unsigned Length;
436 uint32_t MRSReg;
437 uint32_t MSRReg;
438 uint32_t PStateField;
439 };
440
441 struct SysCRImmOp {
442 unsigned Val;
443 };
444
445 struct PrefetchOp {
446 const char *Data;
447 unsigned Length;
448 unsigned Val;
449 };
450
451 struct PSBHintOp {
452 const char *Data;
453 unsigned Length;
454 unsigned Val;
455 };
456
457 struct BTIHintOp {
458 const char *Data;
459 unsigned Length;
460 unsigned Val;
461 };
462
463 struct SVCROp {
464 const char *Data;
465 unsigned Length;
466 unsigned PStateField;
467 };
468
469 union {
470 struct TokOp Tok;
471 struct RegOp Reg;
472 struct MatrixRegOp MatrixReg;
473 struct MatrixTileListOp MatrixTileList;
474 struct VectorListOp VectorList;
475 struct VectorIndexOp VectorIndex;
476 struct ImmOp Imm;
477 struct ShiftedImmOp ShiftedImm;
478 struct CondCodeOp CondCode;
479 struct FPImmOp FPImm;
480 struct BarrierOp Barrier;
481 struct SysRegOp SysReg;
482 struct SysCRImmOp SysCRImm;
483 struct PrefetchOp Prefetch;
484 struct PSBHintOp PSBHint;
485 struct BTIHintOp BTIHint;
486 struct ShiftExtendOp ShiftExtend;
487 struct SVCROp SVCR;
488 };
489
490 // Keep the MCContext around as the MCExprs may need manipulated during
491 // the add<>Operands() calls.
492 MCContext &Ctx;
493
494public:
495 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
496
497 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
498 Kind = o.Kind;
499 StartLoc = o.StartLoc;
500 EndLoc = o.EndLoc;
501 switch (Kind) {
502 case k_Token:
503 Tok = o.Tok;
504 break;
505 case k_Immediate:
506 Imm = o.Imm;
507 break;
508 case k_ShiftedImm:
509 ShiftedImm = o.ShiftedImm;
510 break;
511 case k_CondCode:
512 CondCode = o.CondCode;
513 break;
514 case k_FPImm:
515 FPImm = o.FPImm;
516 break;
517 case k_Barrier:
518 Barrier = o.Barrier;
519 break;
520 case k_Register:
521 Reg = o.Reg;
522 break;
523 case k_MatrixRegister:
524 MatrixReg = o.MatrixReg;
525 break;
526 case k_MatrixTileList:
527 MatrixTileList = o.MatrixTileList;
528 break;
529 case k_VectorList:
530 VectorList = o.VectorList;
531 break;
532 case k_VectorIndex:
533 VectorIndex = o.VectorIndex;
534 break;
535 case k_SysReg:
536 SysReg = o.SysReg;
537 break;
538 case k_SysCR:
539 SysCRImm = o.SysCRImm;
540 break;
541 case k_Prefetch:
542 Prefetch = o.Prefetch;
543 break;
544 case k_PSBHint:
545 PSBHint = o.PSBHint;
546 break;
547 case k_BTIHint:
548 BTIHint = o.BTIHint;
549 break;
550 case k_ShiftExtend:
551 ShiftExtend = o.ShiftExtend;
552 break;
553 case k_SVCR:
554 SVCR = o.SVCR;
555 break;
556 }
557 }
558
559 /// getStartLoc - Get the location of the first token of this operand.
560 SMLoc getStartLoc() const override { return StartLoc; }
561 /// getEndLoc - Get the location of the last token of this operand.
562 SMLoc getEndLoc() const override { return EndLoc; }
563
564 StringRef getToken() const {
565 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 565
, __extension__ __PRETTY_FUNCTION__))
;
566 return StringRef(Tok.Data, Tok.Length);
567 }
568
569 bool isTokenSuffix() const {
570 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 570
, __extension__ __PRETTY_FUNCTION__))
;
571 return Tok.IsSuffix;
572 }
573
574 const MCExpr *getImm() const {
575 assert(Kind == k_Immediate && "Invalid access!")(static_cast <bool> (Kind == k_Immediate && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 575
, __extension__ __PRETTY_FUNCTION__))
;
576 return Imm.Val;
577 }
578
579 const MCExpr *getShiftedImmVal() const {
580 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 580
, __extension__ __PRETTY_FUNCTION__))
;
581 return ShiftedImm.Val;
582 }
583
584 unsigned getShiftedImmShift() const {
585 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 585
, __extension__ __PRETTY_FUNCTION__))
;
586 return ShiftedImm.ShiftAmount;
587 }
588
589 AArch64CC::CondCode getCondCode() const {
590 assert(Kind == k_CondCode && "Invalid access!")(static_cast <bool> (Kind == k_CondCode && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 590
, __extension__ __PRETTY_FUNCTION__))
;
591 return CondCode.Code;
592 }
593
594 APFloat getFPImm() const {
595 assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 595
, __extension__ __PRETTY_FUNCTION__))
;
596 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
597 }
598
599 bool getFPImmIsExact() const {
600 assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 600
, __extension__ __PRETTY_FUNCTION__))
;
601 return FPImm.IsExact;
602 }
603
604 unsigned getBarrier() const {
605 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 605
, __extension__ __PRETTY_FUNCTION__))
;
606 return Barrier.Val;
607 }
608
609 StringRef getBarrierName() const {
610 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 610
, __extension__ __PRETTY_FUNCTION__))
;
611 return StringRef(Barrier.Data, Barrier.Length);
612 }
613
614 bool getBarriernXSModifier() const {
615 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 615
, __extension__ __PRETTY_FUNCTION__))
;
616 return Barrier.HasnXSModifier;
617 }
618
619 unsigned getReg() const override {
620 assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 620
, __extension__ __PRETTY_FUNCTION__))
;
621 return Reg.RegNum;
622 }
623
624 unsigned getMatrixReg() const {
625 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 625
, __extension__ __PRETTY_FUNCTION__))
;
626 return MatrixReg.RegNum;
627 }
628
629 unsigned getMatrixElementWidth() const {
630 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 630
, __extension__ __PRETTY_FUNCTION__))
;
631 return MatrixReg.ElementWidth;
632 }
633
634 MatrixKind getMatrixKind() const {
635 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 635
, __extension__ __PRETTY_FUNCTION__))
;
636 return MatrixReg.Kind;
637 }
638
639 unsigned getMatrixTileListRegMask() const {
640 assert(isMatrixTileList() && "Invalid access!")(static_cast <bool> (isMatrixTileList() && "Invalid access!"
) ? void (0) : __assert_fail ("isMatrixTileList() && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 640
, __extension__ __PRETTY_FUNCTION__))
;
641 return MatrixTileList.RegMask;
642 }
643
644 RegConstraintEqualityTy getRegEqualityTy() const {
645 assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 645
, __extension__ __PRETTY_FUNCTION__))
;
646 return Reg.EqualityTy;
647 }
648
649 unsigned getVectorListStart() const {
650 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 650
, __extension__ __PRETTY_FUNCTION__))
;
651 return VectorList.RegNum;
652 }
653
654 unsigned getVectorListCount() const {
655 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 655
, __extension__ __PRETTY_FUNCTION__))
;
656 return VectorList.Count;
657 }
658
659 int getVectorIndex() const {
660 assert(Kind == k_VectorIndex && "Invalid access!")(static_cast <bool> (Kind == k_VectorIndex && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 660
, __extension__ __PRETTY_FUNCTION__))
;
661 return VectorIndex.Val;
662 }
663
664 StringRef getSysReg() const {
665 assert(Kind == k_SysReg && "Invalid access!")(static_cast <bool> (Kind == k_SysReg && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 665
, __extension__ __PRETTY_FUNCTION__))
;
666 return StringRef(SysReg.Data, SysReg.Length);
667 }
668
669 unsigned getSysCR() const {
670 assert(Kind == k_SysCR && "Invalid access!")(static_cast <bool> (Kind == k_SysCR && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 670
, __extension__ __PRETTY_FUNCTION__))
;
671 return SysCRImm.Val;
672 }
673
674 unsigned getPrefetch() const {
675 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 675
, __extension__ __PRETTY_FUNCTION__))
;
676 return Prefetch.Val;
677 }
678
679 unsigned getPSBHint() const {
680 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 680
, __extension__ __PRETTY_FUNCTION__))
;
681 return PSBHint.Val;
682 }
683
684 StringRef getPSBHintName() const {
685 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 685
, __extension__ __PRETTY_FUNCTION__))
;
686 return StringRef(PSBHint.Data, PSBHint.Length);
687 }
688
689 unsigned getBTIHint() const {
690 assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 690
, __extension__ __PRETTY_FUNCTION__))
;
691 return BTIHint.Val;
692 }
693
694 StringRef getBTIHintName() const {
695 assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 695
, __extension__ __PRETTY_FUNCTION__))
;
696 return StringRef(BTIHint.Data, BTIHint.Length);
697 }
698
699 StringRef getSVCR() const {
700 assert(Kind == k_SVCR && "Invalid access!")(static_cast <bool> (Kind == k_SVCR && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SVCR && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 700
, __extension__ __PRETTY_FUNCTION__))
;
701 return StringRef(SVCR.Data, SVCR.Length);
702 }
703
704 StringRef getPrefetchName() const {
705 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 705
, __extension__ __PRETTY_FUNCTION__))
;
706 return StringRef(Prefetch.Data, Prefetch.Length);
707 }
708
709 AArch64_AM::ShiftExtendType getShiftExtendType() const {
710 if (Kind == k_ShiftExtend)
711 return ShiftExtend.Type;
712 if (Kind == k_Register)
713 return Reg.ShiftExtend.Type;
714 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 714)
;
715 }
716
717 unsigned getShiftExtendAmount() const {
718 if (Kind == k_ShiftExtend)
719 return ShiftExtend.Amount;
720 if (Kind == k_Register)
721 return Reg.ShiftExtend.Amount;
722 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 722)
;
723 }
724
725 bool hasShiftExtendAmount() const {
726 if (Kind == k_ShiftExtend)
727 return ShiftExtend.HasExplicitAmount;
728 if (Kind == k_Register)
729 return Reg.ShiftExtend.HasExplicitAmount;
730 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 730)
;
731 }
732
733 bool isImm() const override { return Kind == k_Immediate; }
734 bool isMem() const override { return false; }
735
736 bool isUImm6() const {
737 if (!isImm())
738 return false;
739 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
740 if (!MCE)
741 return false;
742 int64_t Val = MCE->getValue();
743 return (Val >= 0 && Val < 64);
744 }
745
746 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
747
748 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
749 return isImmScaled<Bits, Scale>(true);
750 }
751
752 template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
753 return isImmScaled<Bits, Scale>(false);
754 }
755
756 template <int Bits, int Scale>
757 DiagnosticPredicate isImmScaled(bool Signed) const {
758 if (!isImm())
759 return DiagnosticPredicateTy::NoMatch;
760
761 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
762 if (!MCE)
763 return DiagnosticPredicateTy::NoMatch;
764
765 int64_t MinVal, MaxVal;
766 if (Signed) {
767 int64_t Shift = Bits - 1;
768 MinVal = (int64_t(1) << Shift) * -Scale;
769 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
770 } else {
771 MinVal = 0;
772 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
773 }
774
775 int64_t Val = MCE->getValue();
776 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
777 return DiagnosticPredicateTy::Match;
778
779 return DiagnosticPredicateTy::NearMatch;
780 }
781
782 DiagnosticPredicate isSVEPattern() const {
783 if (!isImm())
784 return DiagnosticPredicateTy::NoMatch;
785 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
786 if (!MCE)
787 return DiagnosticPredicateTy::NoMatch;
788 int64_t Val = MCE->getValue();
789 if (Val >= 0 && Val < 32)
790 return DiagnosticPredicateTy::Match;
791 return DiagnosticPredicateTy::NearMatch;
792 }
793
794 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
795 AArch64MCExpr::VariantKind ELFRefKind;
796 MCSymbolRefExpr::VariantKind DarwinRefKind;
797 int64_t Addend;
798 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
799 Addend)) {
800 // If we don't understand the expression, assume the best and
801 // let the fixup and relocation code deal with it.
802 return true;
803 }
804
805 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
806 ELFRefKind == AArch64MCExpr::VK_LO12 ||
807 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
808 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
809 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
810 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
811 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
812 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
813 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
814 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
815 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
816 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
817 // Note that we don't range-check the addend. It's adjusted modulo page
818 // size when converted, so there is no "out of range" condition when using
819 // @pageoff.
820 return true;
821 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
822 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
823 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
824 return Addend == 0;
825 }
826
827 return false;
828 }
829
830 template <int Scale> bool isUImm12Offset() const {
831 if (!isImm())
832 return false;
833
834 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
835 if (!MCE)
836 return isSymbolicUImm12Offset(getImm());
837
838 int64_t Val = MCE->getValue();
839 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
840 }
841
842 template <int N, int M>
843 bool isImmInRange() const {
844 if (!isImm())
845 return false;
846 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
847 if (!MCE)
848 return false;
849 int64_t Val = MCE->getValue();
850 return (Val >= N && Val <= M);
851 }
852
853 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
854 // a logical immediate can always be represented when inverted.
855 template <typename T>
856 bool isLogicalImm() const {
857 if (!isImm())
858 return false;
859 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
860 if (!MCE)
861 return false;
862
863 int64_t Val = MCE->getValue();
864 // Avoid left shift by 64 directly.
865 uint64_t Upper = UINT64_C(-1)-1UL << (sizeof(T) * 4) << (sizeof(T) * 4);
866 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
867 if ((Val & Upper) && (Val & Upper) != Upper)
868 return false;
869
870 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
871 }
872
873 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
874
875 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
876 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
877 /// immediate that can be shifted by 'Shift'.
878 template <unsigned Width>
879 Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
880 if (isShiftedImm() && Width == getShiftedImmShift())
881 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
882 return std::make_pair(CE->getValue(), Width);
883
884 if (isImm())
885 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
886 int64_t Val = CE->getValue();
887 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
888 return std::make_pair(Val >> Width, Width);
889 else
890 return std::make_pair(Val, 0u);
891 }
892
893 return {};
894 }
895
896 bool isAddSubImm() const {
897 if (!isShiftedImm() && !isImm())
898 return false;
899
900 const MCExpr *Expr;
901
902 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
903 if (isShiftedImm()) {
904 unsigned Shift = ShiftedImm.ShiftAmount;
905 Expr = ShiftedImm.Val;
906 if (Shift != 0 && Shift != 12)
907 return false;
908 } else {
909 Expr = getImm();
910 }
911
912 AArch64MCExpr::VariantKind ELFRefKind;
913 MCSymbolRefExpr::VariantKind DarwinRefKind;
914 int64_t Addend;
915 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
916 DarwinRefKind, Addend)) {
917 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
918 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
919 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
920 || ELFRefKind == AArch64MCExpr::VK_LO12
921 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
922 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
923 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
924 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
925 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
926 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
927 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
928 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
929 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
930 }
931
932 // If it's a constant, it should be a real immediate in range.
933 if (auto ShiftedVal = getShiftedVal<12>())
934 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
935
936 // If it's an expression, we hope for the best and let the fixup/relocation
937 // code deal with it.
938 return true;
939 }
940
941 bool isAddSubImmNeg() const {
942 if (!isShiftedImm() && !isImm())
943 return false;
944
945 // Otherwise it should be a real negative immediate in range.
946 if (auto ShiftedVal = getShiftedVal<12>())
947 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
948
949 return false;
950 }
951
952 // Signed value in the range -128 to +127. For element widths of
953 // 16 bits or higher it may also be a signed multiple of 256 in the
954 // range -32768 to +32512.
955 // For element-width of 8 bits a range of -128 to 255 is accepted,
956 // since a copy of a byte can be either signed/unsigned.
957 template <typename T>
958 DiagnosticPredicate isSVECpyImm() const {
959 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
960 return DiagnosticPredicateTy::NoMatch;
961
962 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
963 std::is_same<int8_t, T>::value;
964 if (auto ShiftedImm = getShiftedVal<8>())
965 if (!(IsByte && ShiftedImm->second) &&
966 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
967 << ShiftedImm->second))
968 return DiagnosticPredicateTy::Match;
969
970 return DiagnosticPredicateTy::NearMatch;
971 }
972
973 // Unsigned value in the range 0 to 255. For element widths of
974 // 16 bits or higher it may also be a signed multiple of 256 in the
975 // range 0 to 65280.
976 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
977 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
978 return DiagnosticPredicateTy::NoMatch;
979
980 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
981 std::is_same<int8_t, T>::value;
982 if (auto ShiftedImm = getShiftedVal<8>())
983 if (!(IsByte && ShiftedImm->second) &&
984 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
985 << ShiftedImm->second))
986 return DiagnosticPredicateTy::Match;
987
988 return DiagnosticPredicateTy::NearMatch;
989 }
990
991 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
992 if (isLogicalImm<T>() && !isSVECpyImm<T>())
993 return DiagnosticPredicateTy::Match;
994 return DiagnosticPredicateTy::NoMatch;
995 }
996
997 bool isCondCode() const { return Kind == k_CondCode; }
998
999 bool isSIMDImmType10() const {
1000 if (!isImm())
1001 return false;
1002 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1003 if (!MCE)
1004 return false;
1005 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
1006 }
1007
1008 template<int N>
1009 bool isBranchTarget() const {
1010 if (!isImm())
1011 return false;
1012 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1013 if (!MCE)
1014 return true;
1015 int64_t Val = MCE->getValue();
1016 if (Val & 0x3)
1017 return false;
1018 assert(N > 0 && "Branch target immediate cannot be 0 bits!")(static_cast <bool> (N > 0 && "Branch target immediate cannot be 0 bits!"
) ? void (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1018
, __extension__ __PRETTY_FUNCTION__))
;
1019 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1020 }
1021
1022 bool
1023 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1024 if (!isImm())
1025 return false;
1026
1027 AArch64MCExpr::VariantKind ELFRefKind;
1028 MCSymbolRefExpr::VariantKind DarwinRefKind;
1029 int64_t Addend;
1030 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1031 DarwinRefKind, Addend)) {
1032 return false;
1033 }
1034 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1035 return false;
1036
1037 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1038 }
1039
1040 bool isMovWSymbolG3() const {
1041 return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
1042 }
1043
1044 bool isMovWSymbolG2() const {
1045 return isMovWSymbol(
1046 {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
1047 AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
1048 AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
1049 AArch64MCExpr::VK_DTPREL_G2});
1050 }
1051
1052 bool isMovWSymbolG1() const {
1053 return isMovWSymbol(
1054 {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
1055 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
1056 AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
1057 AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
1058 AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
1059 }
1060
1061 bool isMovWSymbolG0() const {
1062 return isMovWSymbol(
1063 {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
1064 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
1065 AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
1066 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
1067 AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
1068 }
1069
1070 template<int RegWidth, int Shift>
1071 bool isMOVZMovAlias() const {
1072 if (!isImm()) return false;
1073
1074 const MCExpr *E = getImm();
1075 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1076 uint64_t Value = CE->getValue();
1077
1078 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1079 }
1080 // Only supports the case of Shift being 0 if an expression is used as an
1081 // operand
1082 return !Shift && E;
1083 }
1084
1085 template<int RegWidth, int Shift>
1086 bool isMOVNMovAlias() const {
1087 if (!isImm()) return false;
1088
1089 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1090 if (!CE) return false;
1091 uint64_t Value = CE->getValue();
1092
1093 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1094 }
1095
1096 bool isFPImm() const {
1097 return Kind == k_FPImm &&
1098 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1099 }
1100
1101 bool isBarrier() const {
1102 return Kind == k_Barrier && !getBarriernXSModifier();
1103 }
1104 bool isBarriernXS() const {
1105 return Kind == k_Barrier && getBarriernXSModifier();
1106 }
1107 bool isSysReg() const { return Kind == k_SysReg; }
1108
1109 bool isMRSSystemRegister() const {
1110 if (!isSysReg()) return false;
1111
1112 return SysReg.MRSReg != -1U;
1113 }
1114
1115 bool isMSRSystemRegister() const {
1116 if (!isSysReg()) return false;
1117 return SysReg.MSRReg != -1U;
1118 }
1119
1120 bool isSystemPStateFieldWithImm0_1() const {
1121 if (!isSysReg()) return false;
1122 return (SysReg.PStateField == AArch64PState::PAN ||
1123 SysReg.PStateField == AArch64PState::DIT ||
1124 SysReg.PStateField == AArch64PState::UAO ||
1125 SysReg.PStateField == AArch64PState::SSBS);
1126 }
1127
1128 bool isSystemPStateFieldWithImm0_15() const {
1129 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1130 return SysReg.PStateField != -1U;
1131 }
1132
1133 bool isSVCR() const {
1134 if (Kind != k_SVCR)
1135 return false;
1136 return SVCR.PStateField != -1U;
1137 }
1138
1139 bool isReg() const override {
1140 return Kind == k_Register;
1141 }
1142
1143 bool isScalarReg() const {
1144 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1145 }
1146
1147 bool isNeonVectorReg() const {
1148 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1149 }
1150
1151 bool isNeonVectorRegLo() const {
1152 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1153 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1154 Reg.RegNum) ||
1155 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1156 Reg.RegNum));
1157 }
1158
1159 bool isMatrix() const { return Kind == k_MatrixRegister; }
1160 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1161
1162 template <unsigned Class> bool isSVEVectorReg() const {
1163 RegKind RK;
1164 switch (Class) {
1165 case AArch64::ZPRRegClassID:
1166 case AArch64::ZPR_3bRegClassID:
1167 case AArch64::ZPR_4bRegClassID:
1168 RK = RegKind::SVEDataVector;
1169 break;
1170 case AArch64::PPRRegClassID:
1171 case AArch64::PPR_3bRegClassID:
1172 RK = RegKind::SVEPredicateVector;
1173 break;
1174 default:
1175 llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class",
"llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1175
)
;
1176 }
1177
1178 return (Kind == k_Register && Reg.Kind == RK) &&
1179 AArch64MCRegisterClasses[Class].contains(getReg());
1180 }
1181
1182 template <unsigned Class> bool isFPRasZPR() const {
1183 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1184 AArch64MCRegisterClasses[Class].contains(getReg());
1185 }
1186
1187 template <int ElementWidth, unsigned Class>
1188 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1189 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1190 return DiagnosticPredicateTy::NoMatch;
1191
1192 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1193 return DiagnosticPredicateTy::Match;
1194
1195 return DiagnosticPredicateTy::NearMatch;
1196 }
1197
1198 template <int ElementWidth, unsigned Class>
1199 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1200 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1201 return DiagnosticPredicateTy::NoMatch;
1202
1203 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1204 return DiagnosticPredicateTy::Match;
1205
1206 return DiagnosticPredicateTy::NearMatch;
1207 }
1208
1209 template <int ElementWidth, unsigned Class,
1210 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1211 bool ShiftWidthAlwaysSame>
1212 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1213 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1214 if (!VectorMatch.isMatch())
1215 return DiagnosticPredicateTy::NoMatch;
1216
1217 // Give a more specific diagnostic when the user has explicitly typed in
1218 // a shift-amount that does not match what is expected, but for which
1219 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1220 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1221 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1222 ShiftExtendTy == AArch64_AM::SXTW) &&
1223 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1224 return DiagnosticPredicateTy::NoMatch;
1225
1226 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1227 return DiagnosticPredicateTy::Match;
1228
1229 return DiagnosticPredicateTy::NearMatch;
1230 }
1231
1232 bool isGPR32as64() const {
1233 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1234 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1235 }
1236
1237 bool isGPR64as32() const {
1238 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1239 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1240 }
1241
1242 bool isGPR64x8() const {
1243 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1244 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1245 Reg.RegNum);
1246 }
1247
1248 bool isWSeqPair() const {
1249 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1250 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1251 Reg.RegNum);
1252 }
1253
1254 bool isXSeqPair() const {
1255 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1256 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1257 Reg.RegNum);
1258 }
1259
1260 template<int64_t Angle, int64_t Remainder>
1261 DiagnosticPredicate isComplexRotation() const {
1262 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1263
1264 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1265 if (!CE) return DiagnosticPredicateTy::NoMatch;
1266 uint64_t Value = CE->getValue();
1267
1268 if (Value % Angle == Remainder && Value <= 270)
1269 return DiagnosticPredicateTy::Match;
1270 return DiagnosticPredicateTy::NearMatch;
1271 }
1272
1273 template <unsigned RegClassID> bool isGPR64() const {
1274 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1275 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1276 }
1277
1278 template <unsigned RegClassID, int ExtWidth>
1279 DiagnosticPredicate isGPR64WithShiftExtend() const {
1280 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1281 return DiagnosticPredicateTy::NoMatch;
1282
1283 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1284 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1285 return DiagnosticPredicateTy::Match;
1286 return DiagnosticPredicateTy::NearMatch;
1287 }
1288
1289 /// Is this a vector list with the type implicit (presumably attached to the
1290 /// instruction itself)?
1291 template <RegKind VectorKind, unsigned NumRegs>
1292 bool isImplicitlyTypedVectorList() const {
1293 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1294 VectorList.NumElements == 0 &&
1295 VectorList.RegisterKind == VectorKind;
1296 }
1297
1298 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1299 unsigned ElementWidth>
1300 bool isTypedVectorList() const {
1301 if (Kind != k_VectorList)
1302 return false;
1303 if (VectorList.Count != NumRegs)
1304 return false;
1305 if (VectorList.RegisterKind != VectorKind)
1306 return false;
1307 if (VectorList.ElementWidth != ElementWidth)
1308 return false;
1309 return VectorList.NumElements == NumElements;
1310 }
1311
1312 template <int Min, int Max>
1313 DiagnosticPredicate isVectorIndex() const {
1314 if (Kind != k_VectorIndex)
1315 return DiagnosticPredicateTy::NoMatch;
1316 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1317 return DiagnosticPredicateTy::Match;
1318 return DiagnosticPredicateTy::NearMatch;
1319 }
1320
1321 bool isToken() const override { return Kind == k_Token; }
1322
1323 bool isTokenEqual(StringRef Str) const {
1324 return Kind == k_Token && getToken() == Str;
1325 }
1326 bool isSysCR() const { return Kind == k_SysCR; }
1327 bool isPrefetch() const { return Kind == k_Prefetch; }
1328 bool isPSBHint() const { return Kind == k_PSBHint; }
1329 bool isBTIHint() const { return Kind == k_BTIHint; }
1330 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1331 bool isShifter() const {
1332 if (!isShiftExtend())
1333 return false;
1334
1335 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1336 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1337 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1338 ST == AArch64_AM::MSL);
1339 }
1340
1341 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1342 if (Kind != k_FPImm)
1343 return DiagnosticPredicateTy::NoMatch;
1344
1345 if (getFPImmIsExact()) {
1346 // Lookup the immediate from table of supported immediates.
1347 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1348 assert(Desc && "Unknown enum value")(static_cast <bool> (Desc && "Unknown enum value"
) ? void (0) : __assert_fail ("Desc && \"Unknown enum value\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1348
, __extension__ __PRETTY_FUNCTION__))
;
1349
1350 // Calculate its FP value.
1351 APFloat RealVal(APFloat::IEEEdouble());
1352 auto StatusOrErr =
1353 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1354 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1355 llvm_unreachable("FP immediate is not exact")::llvm::llvm_unreachable_internal("FP immediate is not exact"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1355
)
;
1356
1357 if (getFPImm().bitwiseIsEqual(RealVal))
1358 return DiagnosticPredicateTy::Match;
1359 }
1360
1361 return DiagnosticPredicateTy::NearMatch;
1362 }
1363
1364 template <unsigned ImmA, unsigned ImmB>
1365 DiagnosticPredicate isExactFPImm() const {
1366 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1367 if ((Res = isExactFPImm<ImmA>()))
1368 return DiagnosticPredicateTy::Match;
1369 if ((Res = isExactFPImm<ImmB>()))
1370 return DiagnosticPredicateTy::Match;
1371 return Res;
1372 }
1373
1374 bool isExtend() const {
1375 if (!isShiftExtend())
1376 return false;
1377
1378 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1379 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1380 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1381 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1382 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1383 ET == AArch64_AM::LSL) &&
1384 getShiftExtendAmount() <= 4;
1385 }
1386
1387 bool isExtend64() const {
1388 if (!isExtend())
1389 return false;
1390 // Make sure the extend expects a 32-bit source register.
1391 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1392 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1393 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1394 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1395 }
1396
1397 bool isExtendLSL64() const {
1398 if (!isExtend())
1399 return false;
1400 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1401 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1402 ET == AArch64_AM::LSL) &&
1403 getShiftExtendAmount() <= 4;
1404 }
1405
1406 template<int Width> bool isMemXExtend() const {
1407 if (!isExtend())
1408 return false;
1409 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1410 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1411 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1412 getShiftExtendAmount() == 0);
1413 }
1414
1415 template<int Width> bool isMemWExtend() const {
1416 if (!isExtend())
1417 return false;
1418 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1419 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1420 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1421 getShiftExtendAmount() == 0);
1422 }
1423
1424 template <unsigned width>
1425 bool isArithmeticShifter() const {
1426 if (!isShifter())
1427 return false;
1428
1429 // An arithmetic shifter is LSL, LSR, or ASR.
1430 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1431 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1432 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1433 }
1434
1435 template <unsigned width>
1436 bool isLogicalShifter() const {
1437 if (!isShifter())
1438 return false;
1439
1440 // A logical shifter is LSL, LSR, ASR or ROR.
1441 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1442 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1443 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1444 getShiftExtendAmount() < width;
1445 }
1446
1447 bool isMovImm32Shifter() const {
1448 if (!isShifter())
1449 return false;
1450
1451 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1452 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1453 if (ST != AArch64_AM::LSL)
1454 return false;
1455 uint64_t Val = getShiftExtendAmount();
1456 return (Val == 0 || Val == 16);
1457 }
1458
1459 bool isMovImm64Shifter() const {
1460 if (!isShifter())
1461 return false;
1462
1463 // A MOVi shifter is LSL of 0 or 16.
1464 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1465 if (ST != AArch64_AM::LSL)
1466 return false;
1467 uint64_t Val = getShiftExtendAmount();
1468 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1469 }
1470
1471 bool isLogicalVecShifter() const {
1472 if (!isShifter())
1473 return false;
1474
1475 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1476 unsigned Shift = getShiftExtendAmount();
1477 return getShiftExtendType() == AArch64_AM::LSL &&
1478 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1479 }
1480
1481 bool isLogicalVecHalfWordShifter() const {
1482 if (!isLogicalVecShifter())
1483 return false;
1484
1485 // A logical vector shifter is a left shift by 0 or 8.
1486 unsigned Shift = getShiftExtendAmount();
1487 return getShiftExtendType() == AArch64_AM::LSL &&
1488 (Shift == 0 || Shift == 8);
1489 }
1490
1491 bool isMoveVecShifter() const {
1492 if (!isShiftExtend())
1493 return false;
1494
1495 // A logical vector shifter is a left shift by 8 or 16.
1496 unsigned Shift = getShiftExtendAmount();
1497 return getShiftExtendType() == AArch64_AM::MSL &&
1498 (Shift == 8 || Shift == 16);
1499 }
1500
1501 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1502 // to LDUR/STUR when the offset is not legal for the former but is for
1503 // the latter. As such, in addition to checking for being a legal unscaled
1504 // address, also check that it is not a legal scaled address. This avoids
1505 // ambiguity in the matcher.
1506 template<int Width>
1507 bool isSImm9OffsetFB() const {
1508 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1509 }
1510
1511 bool isAdrpLabel() const {
1512 // Validation was handled during parsing, so we just verify that
1513 // something didn't go haywire.
1514 if (!isImm())
1515 return false;
1516
1517 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1518 int64_t Val = CE->getValue();
1519 int64_t Min = - (4096 * (1LL << (21 - 1)));
1520 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1521 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1522 }
1523
1524 return true;
1525 }
1526
1527 bool isAdrLabel() const {
1528 // Validation was handled during parsing, so we just verify that
1529 // something didn't go haywire.
1530 if (!isImm())
1531 return false;
1532
1533 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1534 int64_t Val = CE->getValue();
1535 int64_t Min = - (1LL << (21 - 1));
1536 int64_t Max = ((1LL << (21 - 1)) - 1);
1537 return Val >= Min && Val <= Max;
1538 }
1539
1540 return true;
1541 }
1542
1543 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1544 DiagnosticPredicate isMatrixRegOperand() const {
1545 if (!isMatrix())
1546 return DiagnosticPredicateTy::NoMatch;
1547 if (getMatrixKind() != Kind ||
1548 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1549 EltSize != getMatrixElementWidth())
1550 return DiagnosticPredicateTy::NearMatch;
1551 return DiagnosticPredicateTy::Match;
1552 }
1553
1554 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1555 // Add as immediates when possible. Null MCExpr = 0.
1556 if (!Expr)
1557 Inst.addOperand(MCOperand::createImm(0));
1558 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1559 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1560 else
1561 Inst.addOperand(MCOperand::createExpr(Expr));
1562 }
1563
1564 void addRegOperands(MCInst &Inst, unsigned N) const {
1565 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1565
, __extension__ __PRETTY_FUNCTION__))
;
1566 Inst.addOperand(MCOperand::createReg(getReg()));
1567 }
1568
1569 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1570 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1570
, __extension__ __PRETTY_FUNCTION__))
;
1571 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1572 }
1573
1574 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1575 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1575
, __extension__ __PRETTY_FUNCTION__))
;
1576 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1577
, __extension__ __PRETTY_FUNCTION__))
1577 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1577
, __extension__ __PRETTY_FUNCTION__))
;
1578
1579 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1580 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1581 RI->getEncodingValue(getReg()));
1582
1583 Inst.addOperand(MCOperand::createReg(Reg));
1584 }
1585
1586 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1587 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1587
, __extension__ __PRETTY_FUNCTION__))
;
1588 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1589
, __extension__ __PRETTY_FUNCTION__))
1589 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1589
, __extension__ __PRETTY_FUNCTION__))
;
1590
1591 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1592 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1593 RI->getEncodingValue(getReg()));
1594
1595 Inst.addOperand(MCOperand::createReg(Reg));
1596 }
1597
1598 template <int Width>
1599 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1600 unsigned Base;
1601 switch (Width) {
1602 case 8: Base = AArch64::B0; break;
1603 case 16: Base = AArch64::H0; break;
1604 case 32: Base = AArch64::S0; break;
1605 case 64: Base = AArch64::D0; break;
1606 case 128: Base = AArch64::Q0; break;
1607 default:
1608 llvm_unreachable("Unsupported width")::llvm::llvm_unreachable_internal("Unsupported width", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1608)
;
1609 }
1610 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1611 }
1612
1613 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1614 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1614
, __extension__ __PRETTY_FUNCTION__))
;
1615 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1616
, __extension__ __PRETTY_FUNCTION__))
1616 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1616
, __extension__ __PRETTY_FUNCTION__))
;
1617 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1618 }
1619
1620 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1621 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1621
, __extension__ __PRETTY_FUNCTION__))
;
1622 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1623
, __extension__ __PRETTY_FUNCTION__))
1623 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1623
, __extension__ __PRETTY_FUNCTION__))
;
1624 Inst.addOperand(MCOperand::createReg(getReg()));
1625 }
1626
1627 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1628 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1628
, __extension__ __PRETTY_FUNCTION__))
;
1629 Inst.addOperand(MCOperand::createReg(getReg()));
1630 }
1631
1632 enum VecListIndexType {
1633 VecListIdx_DReg = 0,
1634 VecListIdx_QReg = 1,
1635 VecListIdx_ZReg = 2,
1636 };
1637
1638 template <VecListIndexType RegTy, unsigned NumRegs>
1639 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1640 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1640
, __extension__ __PRETTY_FUNCTION__))
;
1641 static const unsigned FirstRegs[][5] = {
1642 /* DReg */ { AArch64::Q0,
1643 AArch64::D0, AArch64::D0_D1,
1644 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1645 /* QReg */ { AArch64::Q0,
1646 AArch64::Q0, AArch64::Q0_Q1,
1647 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1648 /* ZReg */ { AArch64::Z0,
1649 AArch64::Z0, AArch64::Z0_Z1,
1650 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1651 };
1652
1653 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs
<= 4) && " NumRegs must be <= 4 for ZRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1654
, __extension__ __PRETTY_FUNCTION__))
1654 " NumRegs must be <= 4 for ZRegs")(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs
<= 4) && " NumRegs must be <= 4 for ZRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1654
, __extension__ __PRETTY_FUNCTION__))
;
1655
1656 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1657 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1658 FirstRegs[(unsigned)RegTy][0]));
1659 }
1660
1661 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1662 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1662
, __extension__ __PRETTY_FUNCTION__))
;
1663 unsigned RegMask = getMatrixTileListRegMask();
1664 assert(RegMask <= 0xFF && "Invalid mask!")(static_cast <bool> (RegMask <= 0xFF && "Invalid mask!"
) ? void (0) : __assert_fail ("RegMask <= 0xFF && \"Invalid mask!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1664
, __extension__ __PRETTY_FUNCTION__))
;
1665 Inst.addOperand(MCOperand::createImm(RegMask));
1666 }
1667
1668 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1669 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1669
, __extension__ __PRETTY_FUNCTION__))
;
1670 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1671 }
1672
1673 template <unsigned ImmIs0, unsigned ImmIs1>
1674 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1675 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1675
, __extension__ __PRETTY_FUNCTION__))
;
1676 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")(static_cast <bool> (bool(isExactFPImm<ImmIs0, ImmIs1
>()) && "Invalid operand") ? void (0) : __assert_fail
("bool(isExactFPImm<ImmIs0, ImmIs1>()) && \"Invalid operand\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1676
, __extension__ __PRETTY_FUNCTION__))
;
1677 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1678 }
1679
1680 void addImmOperands(MCInst &Inst, unsigned N) const {
1681 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1681
, __extension__ __PRETTY_FUNCTION__))
;
1682 // If this is a pageoff symrefexpr with an addend, adjust the addend
1683 // to be only the page-offset portion. Otherwise, just add the expr
1684 // as-is.
1685 addExpr(Inst, getImm());
1686 }
1687
1688 template <int Shift>
1689 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1690 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1690
, __extension__ __PRETTY_FUNCTION__))
;
1691 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1692 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1693 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1694 } else if (isShiftedImm()) {
1695 addExpr(Inst, getShiftedImmVal());
1696 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1697 } else {
1698 addExpr(Inst, getImm());
1699 Inst.addOperand(MCOperand::createImm(0));
1700 }
1701 }
1702
1703 template <int Shift>
1704 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1705 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1705
, __extension__ __PRETTY_FUNCTION__))
;
1706 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1707 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1708 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1709 } else
1710 llvm_unreachable("Not a shifted negative immediate")::llvm::llvm_unreachable_internal("Not a shifted negative immediate"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1710
)
;
1711 }
1712
1713 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1714 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1714
, __extension__ __PRETTY_FUNCTION__))
;
1715 Inst.addOperand(MCOperand::createImm(getCondCode()));
1716 }
1717
1718 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1719 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1719
, __extension__ __PRETTY_FUNCTION__))
;
1720 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1721 if (!MCE)
1722 addExpr(Inst, getImm());
1723 else
1724 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1725 }
1726
1727 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1728 addImmOperands(Inst, N);
1729 }
1730
1731 template<int Scale>
1732 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1733 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1733
, __extension__ __PRETTY_FUNCTION__))
;
1734 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1735
1736 if (!MCE) {
1737 Inst.addOperand(MCOperand::createExpr(getImm()));
1738 return;
1739 }
1740 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1741 }
1742
1743 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1744 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1744
, __extension__ __PRETTY_FUNCTION__))
;
1745 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1746 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1747 }
1748
1749 template <int Scale>
1750 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1751 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1751
, __extension__ __PRETTY_FUNCTION__))
;
1752 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1753 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1754 }
1755
1756 template <typename T>
1757 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1758 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1758
, __extension__ __PRETTY_FUNCTION__))
;
1759 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1760 std::make_unsigned_t<T> Val = MCE->getValue();
1761 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1762 Inst.addOperand(MCOperand::createImm(encoding));
1763 }
1764
1765 template <typename T>
1766 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1767 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1767
, __extension__ __PRETTY_FUNCTION__))
;
1768 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1769 std::make_unsigned_t<T> Val = ~MCE->getValue();
1770 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1771 Inst.addOperand(MCOperand::createImm(encoding));
1772 }
1773
1774 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1775 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1775
, __extension__ __PRETTY_FUNCTION__))
;
1776 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1777 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1778 Inst.addOperand(MCOperand::createImm(encoding));
1779 }
1780
1781 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1782 // Branch operands don't encode the low bits, so shift them off
1783 // here. If it's a label, however, just put it on directly as there's
1784 // not enough information now to do anything.
1785 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1785
, __extension__ __PRETTY_FUNCTION__))
;
1786 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1787 if (!MCE) {
1788 addExpr(Inst, getImm());
1789 return;
1790 }
1791 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1791
, __extension__ __PRETTY_FUNCTION__))
;
1792 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1793 }
1794
1795 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1796 // Branch operands don't encode the low bits, so shift them off
1797 // here. If it's a label, however, just put it on directly as there's
1798 // not enough information now to do anything.
1799 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1799
, __extension__ __PRETTY_FUNCTION__))
;
1800 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1801 if (!MCE) {
1802 addExpr(Inst, getImm());
1803 return;
1804 }
1805 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1805
, __extension__ __PRETTY_FUNCTION__))
;
1806 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1807 }
1808
1809 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1810 // Branch operands don't encode the low bits, so shift them off
1811 // here. If it's a label, however, just put it on directly as there's
1812 // not enough information now to do anything.
1813 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1813
, __extension__ __PRETTY_FUNCTION__))
;
1814 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1815 if (!MCE) {
1816 addExpr(Inst, getImm());
1817 return;
1818 }
1819 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1819
, __extension__ __PRETTY_FUNCTION__))
;
1820 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1821 }
1822
1823 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1824 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1824
, __extension__ __PRETTY_FUNCTION__))
;
1825 Inst.addOperand(MCOperand::createImm(
1826 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1827 }
1828
1829 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1830 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1830
, __extension__ __PRETTY_FUNCTION__))
;
1831 Inst.addOperand(MCOperand::createImm(getBarrier()));
1832 }
1833
1834 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
1835 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1835
, __extension__ __PRETTY_FUNCTION__))
;
1836 Inst.addOperand(MCOperand::createImm(getBarrier()));
1837 }
1838
1839 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1840 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1840
, __extension__ __PRETTY_FUNCTION__))
;
1841
1842 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1843 }
1844
1845 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1846 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1846
, __extension__ __PRETTY_FUNCTION__))
;
1847
1848 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1849 }
1850
1851 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1852 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1852
, __extension__ __PRETTY_FUNCTION__))
;
1853
1854 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1855 }
1856
1857 void addSVCROperands(MCInst &Inst, unsigned N) const {
1858 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1858
, __extension__ __PRETTY_FUNCTION__))
;
1859
1860 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
1861 }
1862
1863 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1864 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1864
, __extension__ __PRETTY_FUNCTION__))
;
1865
1866 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1867 }
1868
1869 void addSysCROperands(MCInst &Inst, unsigned N) const {
1870 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1870
, __extension__ __PRETTY_FUNCTION__))
;
1871 Inst.addOperand(MCOperand::createImm(getSysCR()));
1872 }
1873
1874 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1875 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1875
, __extension__ __PRETTY_FUNCTION__))
;
1876 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1877 }
1878
1879 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1880 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1880
, __extension__ __PRETTY_FUNCTION__))
;
1881 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1882 }
1883
1884 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1885 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1885
, __extension__ __PRETTY_FUNCTION__))
;
1886 Inst.addOperand(MCOperand::createImm(getBTIHint()));
1887 }
1888
1889 void addShifterOperands(MCInst &Inst, unsigned N) const {
1890 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1890
, __extension__ __PRETTY_FUNCTION__))
;
1891 unsigned Imm =
1892 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1893 Inst.addOperand(MCOperand::createImm(Imm));
1894 }
1895
1896 void addExtendOperands(MCInst &Inst, unsigned N) const {
1897 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1897
, __extension__ __PRETTY_FUNCTION__))
;
1898 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1899 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1900 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1901 Inst.addOperand(MCOperand::createImm(Imm));
1902 }
1903
1904 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1905 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1905
, __extension__ __PRETTY_FUNCTION__))
;
1906 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1907 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1908 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1909 Inst.addOperand(MCOperand::createImm(Imm));
1910 }
1911
1912 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1913 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1913
, __extension__ __PRETTY_FUNCTION__))
;
1914 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1915 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1916 Inst.addOperand(MCOperand::createImm(IsSigned));
1917 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1918 }
1919
1920 // For 8-bit load/store instructions with a register offset, both the
1921 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1922 // they're disambiguated by whether the shift was explicit or implicit rather
1923 // than its size.
1924 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1925 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1925
, __extension__ __PRETTY_FUNCTION__))
;
1926 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1927 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1928 Inst.addOperand(MCOperand::createImm(IsSigned));
1929 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1930 }
1931
1932 template<int Shift>
1933 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1934 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1934
, __extension__ __PRETTY_FUNCTION__))
;
1935
1936 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1937 if (CE) {
1938 uint64_t Value = CE->getValue();
1939 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1940 } else {
1941 addExpr(Inst, getImm());
1942 }
1943 }
1944
1945 template<int Shift>
1946 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1947 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1947
, __extension__ __PRETTY_FUNCTION__))
;
1948
1949 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1950 uint64_t Value = CE->getValue();
1951 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1952 }
1953
1954 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1955 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1955
, __extension__ __PRETTY_FUNCTION__))
;
1956 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1957 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1958 }
1959
1960 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1961 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1961
, __extension__ __PRETTY_FUNCTION__))
;
1962 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1963 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1964 }
1965
1966 void print(raw_ostream &OS) const override;
1967
1968 static std::unique_ptr<AArch64Operand>
1969 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
1970 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1971 Op->Tok.Data = Str.data();
1972 Op->Tok.Length = Str.size();
1973 Op->Tok.IsSuffix = IsSuffix;
1974 Op->StartLoc = S;
1975 Op->EndLoc = S;
1976 return Op;
1977 }
1978
1979 static std::unique_ptr<AArch64Operand>
1980 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1981 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1982 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1983 unsigned ShiftAmount = 0,
1984 unsigned HasExplicitAmount = false) {
1985 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1986 Op->Reg.RegNum = RegNum;
1987 Op->Reg.Kind = Kind;
1988 Op->Reg.ElementWidth = 0;
1989 Op->Reg.EqualityTy = EqTy;
1990 Op->Reg.ShiftExtend.Type = ExtTy;
1991 Op->Reg.ShiftExtend.Amount = ShiftAmount;
1992 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1993 Op->StartLoc = S;
1994 Op->EndLoc = E;
1995 return Op;
1996 }
1997
1998 static std::unique_ptr<AArch64Operand>
1999 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2000 SMLoc S, SMLoc E, MCContext &Ctx,
2001 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2002 unsigned ShiftAmount = 0,
2003 unsigned HasExplicitAmount = false) {
2004 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
) && "Invalid vector kind") ? void (0) : __assert_fail
("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2006
, __extension__ __PRETTY_FUNCTION__))
2005 Kind == RegKind::SVEPredicateVector) &&(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
) && "Invalid vector kind") ? void (0) : __assert_fail
("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2006
, __extension__ __PRETTY_FUNCTION__))
2006 "Invalid vector kind")(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
) && "Invalid vector kind") ? void (0) : __assert_fail
("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2006
, __extension__ __PRETTY_FUNCTION__))
;
2007 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2008 HasExplicitAmount);
2009 Op->Reg.ElementWidth = ElementWidth;
2010 return Op;
2011 }
2012
2013 static std::unique_ptr<AArch64Operand>
2014 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
2015 unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
2016 MCContext &Ctx) {
2017 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2018 Op->VectorList.RegNum = RegNum;
2019 Op->VectorList.Count = Count;
2020 Op->VectorList.NumElements = NumElements;
2021 Op->VectorList.ElementWidth = ElementWidth;
2022 Op->VectorList.RegisterKind = RegisterKind;
2023 Op->StartLoc = S;
2024 Op->EndLoc = E;
2025 return Op;
2026 }
2027
2028 static std::unique_ptr<AArch64Operand>
2029 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2030 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2031 Op->VectorIndex.Val = Idx;
2032 Op->StartLoc = S;
2033 Op->EndLoc = E;
2034 return Op;
2035 }
2036
2037 static std::unique_ptr<AArch64Operand>
2038 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2039 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2040 Op->MatrixTileList.RegMask = RegMask;
2041 Op->StartLoc = S;
2042 Op->EndLoc = E;
2043 return Op;
2044 }
2045
2046 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2047 const unsigned ElementWidth) {
2048 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2049 RegMap = {
2050 {{0, AArch64::ZAB0},
2051 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2052 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2053 {{8, AArch64::ZAB0},
2054 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2055 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2056 {{16, AArch64::ZAH0},
2057 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2058 {{16, AArch64::ZAH1},
2059 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2060 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2061 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2062 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2063 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2064 };
2065
2066 if (ElementWidth == 64)
2067 OutRegs.insert(Reg);
2068 else {
2069 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2070 assert(!Regs.empty() && "Invalid tile or element width!")(static_cast <bool> (!Regs.empty() && "Invalid tile or element width!"
) ? void (0) : __assert_fail ("!Regs.empty() && \"Invalid tile or element width!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2070
, __extension__ __PRETTY_FUNCTION__))
;
2071 for (auto OutReg : Regs)
2072 OutRegs.insert(OutReg);
2073 }
2074 }
2075
2076 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2077 SMLoc E, MCContext &Ctx) {
2078 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2079 Op->Imm.Val = Val;
2080 Op->StartLoc = S;
2081 Op->EndLoc = E;
2082 return Op;
2083 }
2084
2085 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2086 unsigned ShiftAmount,
2087 SMLoc S, SMLoc E,
2088 MCContext &Ctx) {
2089 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2090 Op->ShiftedImm .Val = Val;
2091 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2092 Op->StartLoc = S;
2093 Op->EndLoc = E;
2094 return Op;
2095 }
2096
2097 static std::unique_ptr<AArch64Operand>
2098 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2099 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2100 Op->CondCode.Code = Code;
2101 Op->StartLoc = S;
2102 Op->EndLoc = E;
2103 return Op;
2104 }
2105
2106 static std::unique_ptr<AArch64Operand>
2107 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2108 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2109 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2110 Op->FPImm.IsExact = IsExact;
2111 Op->StartLoc = S;
2112 Op->EndLoc = S;
2113 return Op;
2114 }
2115
2116 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2117 StringRef Str,
2118 SMLoc S,
2119 MCContext &Ctx,
2120 bool HasnXSModifier) {
2121 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2122 Op->Barrier.Val = Val;
2123 Op->Barrier.Data = Str.data();
2124 Op->Barrier.Length = Str.size();
2125 Op->Barrier.HasnXSModifier = HasnXSModifier;
2126 Op->StartLoc = S;
2127 Op->EndLoc = S;
2128 return Op;
2129 }
2130
2131 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2132 uint32_t MRSReg,
2133 uint32_t MSRReg,
2134 uint32_t PStateField,
2135 MCContext &Ctx) {
2136 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2137 Op->SysReg.Data = Str.data();
2138 Op->SysReg.Length = Str.size();
2139 Op->SysReg.MRSReg = MRSReg;
2140 Op->SysReg.MSRReg = MSRReg;
2141 Op->SysReg.PStateField = PStateField;
2142 Op->StartLoc = S;
2143 Op->EndLoc = S;
2144 return Op;
2145 }
2146
2147 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2148 SMLoc E, MCContext &Ctx) {
2149 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2150 Op->SysCRImm.Val = Val;
2151 Op->StartLoc = S;
2152 Op->EndLoc = E;
2153 return Op;
2154 }
2155
2156 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2157 StringRef Str,
2158 SMLoc S,
2159 MCContext &Ctx) {
2160 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2161 Op->Prefetch.Val = Val;
2162 Op->Barrier.Data = Str.data();
2163 Op->Barrier.Length = Str.size();
2164 Op->StartLoc = S;
2165 Op->EndLoc = S;
2166 return Op;
2167 }
2168
2169 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2170 StringRef Str,
2171 SMLoc S,
2172 MCContext &Ctx) {
2173 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2174 Op->PSBHint.Val = Val;
2175 Op->PSBHint.Data = Str.data();
2176 Op->PSBHint.Length = Str.size();
2177 Op->StartLoc = S;
2178 Op->EndLoc = S;
2179 return Op;
2180 }
2181
2182 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2183 StringRef Str,
2184 SMLoc S,
2185 MCContext &Ctx) {
2186 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2187 Op->BTIHint.Val = Val | 32;
2188 Op->BTIHint.Data = Str.data();
2189 Op->BTIHint.Length = Str.size();
2190 Op->StartLoc = S;
2191 Op->EndLoc = S;
2192 return Op;
2193 }
2194
2195 static std::unique_ptr<AArch64Operand>
2196 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2197 SMLoc S, SMLoc E, MCContext &Ctx) {
2198 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2199 Op->MatrixReg.RegNum = RegNum;
2200 Op->MatrixReg.ElementWidth = ElementWidth;
2201 Op->MatrixReg.Kind = Kind;
2202 Op->StartLoc = S;
2203 Op->EndLoc = E;
2204 return Op;
2205 }
2206
2207 static std::unique_ptr<AArch64Operand>
2208 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2209 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2210 Op->SVCR.PStateField = PStateField;
2211 Op->SVCR.Data = Str.data();
2212 Op->SVCR.Length = Str.size();
2213 Op->StartLoc = S;
2214 Op->EndLoc = S;
2215 return Op;
2216 }
2217
2218 static std::unique_ptr<AArch64Operand>
2219 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2220 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2221 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2222 Op->ShiftExtend.Type = ShOp;
2223 Op->ShiftExtend.Amount = Val;
2224 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2225 Op->StartLoc = S;
2226 Op->EndLoc = E;
2227 return Op;
2228 }
2229};
2230
2231} // end anonymous namespace.
2232
2233void AArch64Operand::print(raw_ostream &OS) const {
2234 switch (Kind) {
2235 case k_FPImm:
2236 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2237 if (!getFPImmIsExact())
2238 OS << " (inexact)";
2239 OS << ">";
2240 break;
2241 case k_Barrier: {
2242 StringRef Name = getBarrierName();
2243 if (!Name.empty())
2244 OS << "<barrier " << Name << ">";
2245 else
2246 OS << "<barrier invalid #" << getBarrier() << ">";
2247 break;
2248 }
2249 case k_Immediate:
2250 OS << *getImm();
2251 break;
2252 case k_ShiftedImm: {
2253 unsigned Shift = getShiftedImmShift();
2254 OS << "<shiftedimm ";
2255 OS << *getShiftedImmVal();
2256 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2257 break;
2258 }
2259 case k_CondCode:
2260 OS << "<condcode " << getCondCode() << ">";
2261 break;
2262 case k_VectorList: {
2263 OS << "<vectorlist ";
2264 unsigned Reg = getVectorListStart();
2265 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2266 OS << Reg + i << " ";
2267 OS << ">";
2268 break;
2269 }
2270 case k_VectorIndex:
2271 OS << "<vectorindex " << getVectorIndex() << ">";
2272 break;
2273 case k_SysReg:
2274 OS << "<sysreg: " << getSysReg() << '>';
2275 break;
2276 case k_Token:
2277 OS << "'" << getToken() << "'";
2278 break;
2279 case k_SysCR:
2280 OS << "c" << getSysCR();
2281 break;
2282 case k_Prefetch: {
2283 StringRef Name = getPrefetchName();
2284 if (!Name.empty())
2285 OS << "<prfop " << Name << ">";
2286 else
2287 OS << "<prfop invalid #" << getPrefetch() << ">";
2288 break;
2289 }
2290 case k_PSBHint:
2291 OS << getPSBHintName();
2292 break;
2293 case k_BTIHint:
2294 OS << getBTIHintName();
2295 break;
2296 case k_MatrixRegister:
2297 OS << "<matrix " << getMatrixReg() << ">";
2298 break;
2299 case k_MatrixTileList: {
2300 OS << "<matrixlist ";
2301 unsigned RegMask = getMatrixTileListRegMask();
2302 unsigned MaxBits = 8;
2303 for (unsigned I = MaxBits; I > 0; --I)
2304 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2305 OS << '>';
2306 break;
2307 }
2308 case k_SVCR: {
2309 OS << getSVCR();
2310 break;
2311 }
2312 case k_Register:
2313 OS << "<register " << getReg() << ">";
2314 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2315 break;
2316 [[fallthrough]];
2317 case k_ShiftExtend:
2318 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2319 << getShiftExtendAmount();
2320 if (!hasShiftExtendAmount())
2321 OS << "<imp>";
2322 OS << '>';
2323 break;
2324 }
2325}
2326
2327/// @name Auto-generated Match Functions
2328/// {
2329
2330static unsigned MatchRegisterName(StringRef Name);
2331
2332/// }
2333
2334static unsigned MatchNeonVectorRegName(StringRef Name) {
2335 return StringSwitch<unsigned>(Name.lower())
2336 .Case("v0", AArch64::Q0)
2337 .Case("v1", AArch64::Q1)
2338 .Case("v2", AArch64::Q2)
2339 .Case("v3", AArch64::Q3)
2340 .Case("v4", AArch64::Q4)
2341 .Case("v5", AArch64::Q5)
2342 .Case("v6", AArch64::Q6)
2343 .Case("v7", AArch64::Q7)
2344 .Case("v8", AArch64::Q8)
2345 .Case("v9", AArch64::Q9)
2346 .Case("v10", AArch64::Q10)
2347 .Case("v11", AArch64::Q11)
2348 .Case("v12", AArch64::Q12)
2349 .Case("v13", AArch64::Q13)
2350 .Case("v14", AArch64::Q14)
2351 .Case("v15", AArch64::Q15)
2352 .Case("v16", AArch64::Q16)
2353 .Case("v17", AArch64::Q17)
2354 .Case("v18", AArch64::Q18)
2355 .Case("v19", AArch64::Q19)
2356 .Case("v20", AArch64::Q20)
2357 .Case("v21", AArch64::Q21)
2358 .Case("v22", AArch64::Q22)
2359 .Case("v23", AArch64::Q23)
2360 .Case("v24", AArch64::Q24)
2361 .Case("v25", AArch64::Q25)
2362 .Case("v26", AArch64::Q26)
2363 .Case("v27", AArch64::Q27)
2364 .Case("v28", AArch64::Q28)
2365 .Case("v29", AArch64::Q29)
2366 .Case("v30", AArch64::Q30)
2367 .Case("v31", AArch64::Q31)
2368 .Default(0);
2369}
2370
2371/// Returns an optional pair of (#elements, element-width) if Suffix
2372/// is a valid vector kind. Where the number of elements in a vector
2373/// or the vector width is implicit or explicitly unknown (but still a
2374/// valid suffix kind), 0 is used.
2375static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2376 RegKind VectorKind) {
2377 std::pair<int, int> Res = {-1, -1};
2378
2379 switch (VectorKind) {
2380 case RegKind::NeonVector:
2381 Res =
2382 StringSwitch<std::pair<int, int>>(Suffix.lower())
2383 .Case("", {0, 0})
2384 .Case(".1d", {1, 64})
2385 .Case(".1q", {1, 128})
2386 // '.2h' needed for fp16 scalar pairwise reductions
2387 .Case(".2h", {2, 16})
2388 .Case(".2s", {2, 32})
2389 .Case(".2d", {2, 64})
2390 // '.4b' is another special case for the ARMv8.2a dot product
2391 // operand
2392 .Case(".4b", {4, 8})
2393 .Case(".4h", {4, 16})
2394 .Case(".4s", {4, 32})
2395 .Case(".8b", {8, 8})
2396 .Case(".8h", {8, 16})
2397 .Case(".16b", {16, 8})
2398 // Accept the width neutral ones, too, for verbose syntax. If those
2399 // aren't used in the right places, the token operand won't match so
2400 // all will work out.
2401 .Case(".b", {0, 8})
2402 .Case(".h", {0, 16})
2403 .Case(".s", {0, 32})
2404 .Case(".d", {0, 64})
2405 .Default({-1, -1});
2406 break;
2407 case RegKind::SVEPredicateVector:
2408 case RegKind::SVEDataVector:
2409 case RegKind::Matrix:
2410 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2411 .Case("", {0, 0})
2412 .Case(".b", {0, 8})
2413 .Case(".h", {0, 16})
2414 .Case(".s", {0, 32})
2415 .Case(".d", {0, 64})
2416 .Case(".q", {0, 128})
2417 .Default({-1, -1});
2418 break;
2419 default:
2420 llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2420)
;
2421 }
2422
2423 if (Res == std::make_pair(-1, -1))
2424 return Optional<std::pair<int, int>>();
2425
2426 return Optional<std::pair<int, int>>(Res);
2427}
2428
2429static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2430 return parseVectorKind(Suffix, VectorKind).has_value();
2431}
2432
2433static unsigned matchSVEDataVectorRegName(StringRef Name) {
2434 return StringSwitch<unsigned>(Name.lower())
2435 .Case("z0", AArch64::Z0)
2436 .Case("z1", AArch64::Z1)
2437 .Case("z2", AArch64::Z2)
2438 .Case("z3", AArch64::Z3)
2439 .Case("z4", AArch64::Z4)
2440 .Case("z5", AArch64::Z5)
2441 .Case("z6", AArch64::Z6)
2442 .Case("z7", AArch64::Z7)
2443 .Case("z8", AArch64::Z8)
2444 .Case("z9", AArch64::Z9)
2445 .Case("z10", AArch64::Z10)
2446 .Case("z11", AArch64::Z11)
2447 .Case("z12", AArch64::Z12)
2448 .Case("z13", AArch64::Z13)
2449 .Case("z14", AArch64::Z14)
2450 .Case("z15", AArch64::Z15)
2451 .Case("z16", AArch64::Z16)
2452 .Case("z17", AArch64::Z17)
2453 .Case("z18", AArch64::Z18)
2454 .Case("z19", AArch64::Z19)
2455 .Case("z20", AArch64::Z20)
2456 .Case("z21", AArch64::Z21)
2457 .Case("z22", AArch64::Z22)
2458 .Case("z23", AArch64::Z23)
2459 .Case("z24", AArch64::Z24)
2460 .Case("z25", AArch64::Z25)
2461 .Case("z26", AArch64::Z26)
2462 .Case("z27", AArch64::Z27)
2463 .Case("z28", AArch64::Z28)
2464 .Case("z29", AArch64::Z29)
2465 .Case("z30", AArch64::Z30)
2466 .Case("z31", AArch64::Z31)
2467 .Default(0);
2468}
2469
2470static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2471 return StringSwitch<unsigned>(Name.lower())
2472 .Case("p0", AArch64::P0)
2473 .Case("p1", AArch64::P1)
2474 .Case("p2", AArch64::P2)
2475 .Case("p3", AArch64::P3)
2476 .Case("p4", AArch64::P4)
2477 .Case("p5", AArch64::P5)
2478 .Case("p6", AArch64::P6)
2479 .Case("p7", AArch64::P7)
2480 .Case("p8", AArch64::P8)
2481 .Case("p9", AArch64::P9)
2482 .Case("p10", AArch64::P10)
2483 .Case("p11", AArch64::P11)
2484 .Case("p12", AArch64::P12)
2485 .Case("p13", AArch64::P13)
2486 .Case("p14", AArch64::P14)
2487 .Case("p15", AArch64::P15)
2488 .Default(0);
2489}
2490
2491static unsigned matchMatrixTileListRegName(StringRef Name) {
2492 return StringSwitch<unsigned>(Name.lower())
2493 .Case("za0.d", AArch64::ZAD0)
2494 .Case("za1.d", AArch64::ZAD1)
2495 .Case("za2.d", AArch64::ZAD2)
2496 .Case("za3.d", AArch64::ZAD3)
2497 .Case("za4.d", AArch64::ZAD4)
2498 .Case("za5.d", AArch64::ZAD5)
2499 .Case("za6.d", AArch64::ZAD6)
2500 .Case("za7.d", AArch64::ZAD7)
2501 .Case("za0.s", AArch64::ZAS0)
2502 .Case("za1.s", AArch64::ZAS1)
2503 .Case("za2.s", AArch64::ZAS2)
2504 .Case("za3.s", AArch64::ZAS3)
2505 .Case("za0.h", AArch64::ZAH0)
2506 .Case("za1.h", AArch64::ZAH1)
2507 .Case("za0.b", AArch64::ZAB0)
2508 .Default(0);
2509}
2510
2511static unsigned matchMatrixRegName(StringRef Name) {
2512 return StringSwitch<unsigned>(Name.lower())
2513 .Case("za", AArch64::ZA)
2514 .Case("za0.q", AArch64::ZAQ0)
2515 .Case("za1.q", AArch64::ZAQ1)
2516 .Case("za2.q", AArch64::ZAQ2)
2517 .Case("za3.q", AArch64::ZAQ3)
2518 .Case("za4.q", AArch64::ZAQ4)
2519 .Case("za5.q", AArch64::ZAQ5)
2520 .Case("za6.q", AArch64::ZAQ6)
2521 .Case("za7.q", AArch64::ZAQ7)
2522 .Case("za8.q", AArch64::ZAQ8)
2523 .Case("za9.q", AArch64::ZAQ9)
2524 .Case("za10.q", AArch64::ZAQ10)
2525 .Case("za11.q", AArch64::ZAQ11)
2526 .Case("za12.q", AArch64::ZAQ12)
2527 .Case("za13.q", AArch64::ZAQ13)
2528 .Case("za14.q", AArch64::ZAQ14)
2529 .Case("za15.q", AArch64::ZAQ15)
2530 .Case("za0.d", AArch64::ZAD0)
2531 .Case("za1.d", AArch64::ZAD1)
2532 .Case("za2.d", AArch64::ZAD2)
2533 .Case("za3.d", AArch64::ZAD3)
2534 .Case("za4.d", AArch64::ZAD4)
2535 .Case("za5.d", AArch64::ZAD5)
2536 .Case("za6.d", AArch64::ZAD6)
2537 .Case("za7.d", AArch64::ZAD7)
2538 .Case("za0.s", AArch64::ZAS0)
2539 .Case("za1.s", AArch64::ZAS1)
2540 .Case("za2.s", AArch64::ZAS2)
2541 .Case("za3.s", AArch64::ZAS3)
2542 .Case("za0.h", AArch64::ZAH0)
2543 .Case("za1.h", AArch64::ZAH1)
2544 .Case("za0.b", AArch64::ZAB0)
2545 .Case("za0h.q", AArch64::ZAQ0)
2546 .Case("za1h.q", AArch64::ZAQ1)
2547 .Case("za2h.q", AArch64::ZAQ2)
2548 .Case("za3h.q", AArch64::ZAQ3)
2549 .Case("za4h.q", AArch64::ZAQ4)
2550 .Case("za5h.q", AArch64::ZAQ5)
2551 .Case("za6h.q", AArch64::ZAQ6)
2552 .Case("za7h.q", AArch64::ZAQ7)
2553 .Case("za8h.q", AArch64::ZAQ8)
2554 .Case("za9h.q", AArch64::ZAQ9)
2555 .Case("za10h.q", AArch64::ZAQ10)
2556 .Case("za11h.q", AArch64::ZAQ11)
2557 .Case("za12h.q", AArch64::ZAQ12)
2558 .Case("za13h.q", AArch64::ZAQ13)
2559 .Case("za14h.q", AArch64::ZAQ14)
2560 .Case("za15h.q", AArch64::ZAQ15)
2561 .Case("za0h.d", AArch64::ZAD0)
2562 .Case("za1h.d", AArch64::ZAD1)
2563 .Case("za2h.d", AArch64::ZAD2)
2564 .Case("za3h.d", AArch64::ZAD3)
2565 .Case("za4h.d", AArch64::ZAD4)
2566 .Case("za5h.d", AArch64::ZAD5)
2567 .Case("za6h.d", AArch64::ZAD6)
2568 .Case("za7h.d", AArch64::ZAD7)
2569 .Case("za0h.s", AArch64::ZAS0)
2570 .Case("za1h.s", AArch64::ZAS1)
2571 .Case("za2h.s", AArch64::ZAS2)
2572 .Case("za3h.s", AArch64::ZAS3)
2573 .Case("za0h.h", AArch64::ZAH0)
2574 .Case("za1h.h", AArch64::ZAH1)
2575 .Case("za0h.b", AArch64::ZAB0)
2576 .Case("za0v.q", AArch64::ZAQ0)
2577 .Case("za1v.q", AArch64::ZAQ1)
2578 .Case("za2v.q", AArch64::ZAQ2)
2579 .Case("za3v.q", AArch64::ZAQ3)
2580 .Case("za4v.q", AArch64::ZAQ4)
2581 .Case("za5v.q", AArch64::ZAQ5)
2582 .Case("za6v.q", AArch64::ZAQ6)
2583 .Case("za7v.q", AArch64::ZAQ7)
2584 .Case("za8v.q", AArch64::ZAQ8)
2585 .Case("za9v.q", AArch64::ZAQ9)
2586 .Case("za10v.q", AArch64::ZAQ10)
2587 .Case("za11v.q", AArch64::ZAQ11)
2588 .Case("za12v.q", AArch64::ZAQ12)
2589 .Case("za13v.q", AArch64::ZAQ13)
2590 .Case("za14v.q", AArch64::ZAQ14)
2591 .Case("za15v.q", AArch64::ZAQ15)
2592 .Case("za0v.d", AArch64::ZAD0)
2593 .Case("za1v.d", AArch64::ZAD1)
2594 .Case("za2v.d", AArch64::ZAD2)
2595 .Case("za3v.d", AArch64::ZAD3)
2596 .Case("za4v.d", AArch64::ZAD4)
2597 .Case("za5v.d", AArch64::ZAD5)
2598 .Case("za6v.d", AArch64::ZAD6)
2599 .Case("za7v.d", AArch64::ZAD7)
2600 .Case("za0v.s", AArch64::ZAS0)
2601 .Case("za1v.s", AArch64::ZAS1)
2602 .Case("za2v.s", AArch64::ZAS2)
2603 .Case("za3v.s", AArch64::ZAS3)
2604 .Case("za0v.h", AArch64::ZAH0)
2605 .Case("za1v.h", AArch64::ZAH1)
2606 .Case("za0v.b", AArch64::ZAB0)
2607 .Default(0);
2608}
2609
2610bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2611 SMLoc &EndLoc) {
2612 return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
27
Calling 'AArch64AsmParser::tryParseRegister'
33
Returning from 'AArch64AsmParser::tryParseRegister'
34
Returning without writing to 'RegNo'
2613}
2614
2615OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2616 SMLoc &StartLoc,
2617 SMLoc &EndLoc) {
2618 StartLoc = getLoc();
2619 auto Res = tryParseScalarRegister(RegNo);
28
Calling 'AArch64AsmParser::tryParseScalarRegister'
31
Returning from 'AArch64AsmParser::tryParseScalarRegister'
2620 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2621 return Res;
32
Returning without writing to 'RegNo'
2622}
2623
2624// Matches a register name or register alias previously defined by '.req'
2625unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2626 RegKind Kind) {
2627 unsigned RegNum = 0;
2628 if ((RegNum = matchSVEDataVectorRegName(Name)))
2629 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2630
2631 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2632 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2633
2634 if ((RegNum = MatchNeonVectorRegName(Name)))
2635 return Kind == RegKind::NeonVector ? RegNum : 0;
2636
2637 if ((RegNum = matchMatrixRegName(Name)))
2638 return Kind == RegKind::Matrix ? RegNum : 0;
2639
2640 // The parsed register must be of RegKind Scalar
2641 if ((RegNum = MatchRegisterName(Name)))
2642 return Kind == RegKind::Scalar ? RegNum : 0;
2643
2644 if (!RegNum) {
2645 // Handle a few common aliases of registers.
2646 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2647 .Case("fp", AArch64::FP)
2648 .Case("lr", AArch64::LR)
2649 .Case("x31", AArch64::XZR)
2650 .Case("w31", AArch64::WZR)
2651 .Default(0))
2652 return Kind == RegKind::Scalar ? RegNum : 0;
2653
2654 // Check for aliases registered via .req. Canonicalize to lower case.
2655 // That's more consistent since register names are case insensitive, and
2656 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2657 auto Entry = RegisterReqs.find(Name.lower());
2658 if (Entry == RegisterReqs.end())
2659 return 0;
2660
2661 // set RegNum if the match is the right kind of register
2662 if (Kind == Entry->getValue().first)
2663 RegNum = Entry->getValue().second;
2664 }
2665 return RegNum;
2666}
2667
2668/// tryParseScalarRegister - Try to parse a register name. The token must be an
2669/// Identifier when called, and if it is a register name the token is eaten and
2670/// the register is added to the operand list.
2671OperandMatchResultTy
2672AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2673 const AsmToken &Tok = getTok();
2674 if (Tok.isNot(AsmToken::Identifier))
29
Taking true branch
2675 return MatchOperand_NoMatch;
30
Returning without writing to 'Reg'
2676
2677 std::string lowerCase = Tok.getString().lower();
2678 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2679 if (Reg == 0)
2680 return MatchOperand_NoMatch;
2681
2682 RegNum = Reg;
2683 Lex(); // Eat identifier token.
2684 return MatchOperand_Success;
2685}
2686
2687/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2688OperandMatchResultTy
2689AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2690 SMLoc S = getLoc();
2691
2692 if (getTok().isNot(AsmToken::Identifier)) {
2693 Error(S, "Expected cN operand where 0 <= N <= 15");
2694 return MatchOperand_ParseFail;
2695 }
2696
2697 StringRef Tok = getTok().getIdentifier();
2698 if (Tok[0] != 'c' && Tok[0] != 'C') {
2699 Error(S, "Expected cN operand where 0 <= N <= 15");
2700 return MatchOperand_ParseFail;
2701 }
2702
2703 uint32_t CRNum;
2704 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2705 if (BadNum || CRNum > 15) {
2706 Error(S, "Expected cN operand where 0 <= N <= 15");
2707 return MatchOperand_ParseFail;
2708 }
2709
2710 Lex(); // Eat identifier token.
2711 Operands.push_back(
2712 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2713 return MatchOperand_Success;
2714}
2715
2716/// tryParsePrefetch - Try to parse a prefetch operand.
2717template <bool IsSVEPrefetch>
2718OperandMatchResultTy
2719AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2720 SMLoc S = getLoc();
2721 const AsmToken &Tok = getTok();
2722
2723 auto LookupByName = [](StringRef N) {
2724 if (IsSVEPrefetch) {
2725 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2726 return Optional<unsigned>(Res->Encoding);
2727 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2728 return Optional<unsigned>(Res->Encoding);
2729 return Optional<unsigned>();
2730 };
2731
2732 auto LookupByEncoding = [](unsigned E) {
2733 if (IsSVEPrefetch) {
2734 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2735 return Optional<StringRef>(Res->Name);
2736 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2737 return Optional<StringRef>(Res->Name);
2738 return Optional<StringRef>();
2739 };
2740 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2741
2742 // Either an identifier for named values or a 5-bit immediate.
2743 // Eat optional hash.
2744 if (parseOptionalToken(AsmToken::Hash) ||
2745 Tok.is(AsmToken::Integer)) {
2746 const MCExpr *ImmVal;
2747 if (getParser().parseExpression(ImmVal))
2748 return MatchOperand_ParseFail;
2749
2750 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2751 if (!MCE) {
2752 TokError("immediate value expected for prefetch operand");
2753 return MatchOperand_ParseFail;
2754 }
2755 unsigned prfop = MCE->getValue();
2756 if (prfop > MaxVal) {
2757 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2758 "] expected");
2759 return MatchOperand_ParseFail;
2760 }
2761
2762 auto PRFM = LookupByEncoding(MCE->getValue());
2763 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
2764 S, getContext()));
2765 return MatchOperand_Success;
2766 }
2767
2768 if (Tok.isNot(AsmToken::Identifier)) {
2769 TokError("prefetch hint expected");
2770 return MatchOperand_ParseFail;
2771 }
2772
2773 auto PRFM = LookupByName(Tok.getString());
2774 if (!PRFM) {
2775 TokError("prefetch hint expected");
2776 return MatchOperand_ParseFail;
2777 }
2778
2779 Operands.push_back(AArch64Operand::CreatePrefetch(
2780 *PRFM, Tok.getString(), S, getContext()));
2781 Lex(); // Eat identifier token.
2782 return MatchOperand_Success;
2783}
2784
2785/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2786OperandMatchResultTy
2787AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2788 SMLoc S = getLoc();
2789 const AsmToken &Tok = getTok();
2790 if (Tok.isNot(AsmToken::Identifier)) {
2791 TokError("invalid operand for instruction");
2792 return MatchOperand_ParseFail;
2793 }
2794
2795 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2796 if (!PSB) {
2797 TokError("invalid operand for instruction");
2798 return MatchOperand_ParseFail;
2799 }
2800
2801 Operands.push_back(AArch64Operand::CreatePSBHint(
2802 PSB->Encoding, Tok.getString(), S, getContext()));
2803 Lex(); // Eat identifier token.
2804 return MatchOperand_Success;
2805}
2806
2807/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2808OperandMatchResultTy
2809AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2810 SMLoc S = getLoc();
2811 const AsmToken &Tok = getTok();
2812 if (Tok.isNot(AsmToken::Identifier)) {
2813 TokError("invalid operand for instruction");
2814 return MatchOperand_ParseFail;
2815 }
2816
2817 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2818 if (!BTI) {
2819 TokError("invalid operand for instruction");
2820 return MatchOperand_ParseFail;
2821 }
2822
2823 Operands.push_back(AArch64Operand::CreateBTIHint(
2824 BTI->Encoding, Tok.getString(), S, getContext()));
2825 Lex(); // Eat identifier token.
2826 return MatchOperand_Success;
2827}
2828
2829/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2830/// instruction.
2831OperandMatchResultTy
2832AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2833 SMLoc S = getLoc();
2834 const MCExpr *Expr = nullptr;
2835
2836 if (getTok().is(AsmToken::Hash)) {
2837 Lex(); // Eat hash token.
2838 }
2839
2840 if (parseSymbolicImmVal(Expr))
2841 return MatchOperand_ParseFail;
2842
2843 AArch64MCExpr::VariantKind ELFRefKind;
2844 MCSymbolRefExpr::VariantKind DarwinRefKind;
2845 int64_t Addend;
2846 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2847 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2848 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2849 // No modifier was specified at all; this is the syntax for an ELF basic
2850 // ADRP relocation (unfortunately).
2851 Expr =
2852 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2853 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2854 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2855 Addend != 0) {
2856 Error(S, "gotpage label reference not allowed an addend");
2857 return MatchOperand_ParseFail;
2858 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2859 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2860 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2861 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2862 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2863 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
2864 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2865 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2866 // The operand must be an @page or @gotpage qualified symbolref.
2867 Error(S, "page or gotpage label reference expected");
2868 return MatchOperand_ParseFail;
2869 }
2870 }
2871
2872 // We have either a label reference possibly with addend or an immediate. The
2873 // addend is a raw value here. The linker will adjust it to only reference the
2874 // page.
2875 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2876 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2877
2878 return MatchOperand_Success;
2879}
2880
2881/// tryParseAdrLabel - Parse and validate a source label for the ADR
2882/// instruction.
2883OperandMatchResultTy
2884AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2885 SMLoc S = getLoc();
2886 const MCExpr *Expr = nullptr;
2887
2888 // Leave anything with a bracket to the default for SVE
2889 if (getTok().is(AsmToken::LBrac))
2890 return MatchOperand_NoMatch;
2891
2892 if (getTok().is(AsmToken::Hash))
2893 Lex(); // Eat hash token.
2894
2895 if (parseSymbolicImmVal(Expr))
2896 return MatchOperand_ParseFail;
2897
2898 AArch64MCExpr::VariantKind ELFRefKind;
2899 MCSymbolRefExpr::VariantKind DarwinRefKind;
2900 int64_t Addend;
2901 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2902 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2903 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2904 // No modifier was specified at all; this is the syntax for an ELF basic
2905 // ADR relocation (unfortunately).
2906 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2907 } else {
2908 Error(S, "unexpected adr label");
2909 return MatchOperand_ParseFail;
2910 }
2911 }
2912
2913 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2914 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2915 return MatchOperand_Success;
2916}
2917
2918/// tryParseFPImm - A floating point immediate expression operand.
2919template<bool AddFPZeroAsLiteral>
2920OperandMatchResultTy
2921AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2922 SMLoc S = getLoc();
2923
2924 bool Hash = parseOptionalToken(AsmToken::Hash);
2925
2926 // Handle negation, as that still comes through as a separate token.
2927 bool isNegative = parseOptionalToken(AsmToken::Minus);
2928
2929 const AsmToken &Tok = getTok();
2930 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2931 if (!Hash)
2932 return MatchOperand_NoMatch;
2933 TokError("invalid floating point immediate");
2934 return MatchOperand_ParseFail;
2935 }
2936
2937 // Parse hexadecimal representation.
2938 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2939 if (Tok.getIntVal() > 255 || isNegative) {
2940 TokError("encoded floating point value out of range");
2941 return MatchOperand_ParseFail;
2942 }
2943
2944 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2945 Operands.push_back(
2946 AArch64Operand::CreateFPImm(F, true, S, getContext()));
2947 } else {
2948 // Parse FP representation.
2949 APFloat RealVal(APFloat::IEEEdouble());
2950 auto StatusOrErr =
2951 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2952 if (errorToBool(StatusOrErr.takeError())) {
2953 TokError("invalid floating point representation");
2954 return MatchOperand_ParseFail;
2955 }
2956
2957 if (isNegative)
2958 RealVal.changeSign();
2959
2960 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2961 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
2962 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
2963 } else
2964 Operands.push_back(AArch64Operand::CreateFPImm(
2965 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2966 }
2967
2968 Lex(); // Eat the token.
2969
2970 return MatchOperand_Success;
2971}
2972
2973/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2974/// a shift suffix, for example '#1, lsl #12'.
2975OperandMatchResultTy
2976AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2977 SMLoc S = getLoc();
2978
2979 if (getTok().is(AsmToken::Hash))
2980 Lex(); // Eat '#'
2981 else if (getTok().isNot(AsmToken::Integer))
2982 // Operand should start from # or should be integer, emit error otherwise.
2983 return MatchOperand_NoMatch;
2984
2985 const MCExpr *Imm = nullptr;
2986 if (parseSymbolicImmVal(Imm))
2987 return MatchOperand_ParseFail;
2988 else if (getTok().isNot(AsmToken::Comma)) {
2989 Operands.push_back(
2990 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
2991 return MatchOperand_Success;
2992 }
2993
2994 // Eat ','
2995 Lex();
2996
2997 // The optional operand must be "lsl #N" where N is non-negative.
2998 if (!getTok().is(AsmToken::Identifier) ||
2999 !getTok().getIdentifier().equals_insensitive("lsl")) {
3000 Error(getLoc(), "only 'lsl #+N' valid after immediate");
3001 return MatchOperand_ParseFail;
3002 }
3003
3004 // Eat 'lsl'
3005 Lex();
3006
3007 parseOptionalToken(AsmToken::Hash);
3008
3009 if (getTok().isNot(AsmToken::Integer)) {
3010 Error(getLoc(), "only 'lsl #+N' valid after immediate");
3011 return MatchOperand_ParseFail;
3012 }
3013
3014 int64_t ShiftAmount = getTok().getIntVal();
3015
3016 if (ShiftAmount < 0) {
3017 Error(getLoc(), "positive shift amount required");
3018 return MatchOperand_ParseFail;
3019 }
3020 Lex(); // Eat the number
3021
3022 // Just in case the optional lsl #0 is used for immediates other than zero.
3023 if (ShiftAmount == 0 && Imm != nullptr) {
3024 Operands.push_back(
3025 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3026 return MatchOperand_Success;
3027 }
3028
3029 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3030 getLoc(), getContext()));
3031 return MatchOperand_Success;
3032}
3033
3034/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3035/// suggestion to help common typos.
3036AArch64CC::CondCode
3037AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3038 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3039 .Case("eq", AArch64CC::EQ)
3040 .Case("ne", AArch64CC::NE)
3041 .Case("cs", AArch64CC::HS)
3042 .Case("hs", AArch64CC::HS)
3043 .Case("cc", AArch64CC::LO)
3044 .Case("lo", AArch64CC::LO)
3045 .Case("mi", AArch64CC::MI)
3046 .Case("pl", AArch64CC::PL)
3047 .Case("vs", AArch64CC::VS)
3048 .Case("vc", AArch64CC::VC)
3049 .Case("hi", AArch64CC::HI)
3050 .Case("ls", AArch64CC::LS)
3051 .Case("ge", AArch64CC::GE)
3052 .Case("lt", AArch64CC::LT)
3053 .Case("gt", AArch64CC::GT)
3054 .Case("le", AArch64CC::LE)
3055 .Case("al", AArch64CC::AL)
3056 .Case("nv", AArch64CC::NV)
3057 .Default(AArch64CC::Invalid);
3058
3059 if (CC == AArch64CC::Invalid &&
3060 getSTI().getFeatureBits()[AArch64::FeatureSVE]) {
3061 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3062 .Case("none", AArch64CC::EQ)
3063 .Case("any", AArch64CC::NE)
3064 .Case("nlast", AArch64CC::HS)
3065 .Case("last", AArch64CC::LO)
3066 .Case("first", AArch64CC::MI)
3067 .Case("nfrst", AArch64CC::PL)
3068 .Case("pmore", AArch64CC::HI)
3069 .Case("plast", AArch64CC::LS)
3070 .Case("tcont", AArch64CC::GE)
3071 .Case("tstop", AArch64CC::LT)
3072 .Default(AArch64CC::Invalid);
3073
3074 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3075 Suggestion = "nfrst";
3076 }
3077 return CC;
3078}
3079
3080/// parseCondCode - Parse a Condition Code operand.
3081bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3082 bool invertCondCode) {
3083 SMLoc S = getLoc();
3084 const AsmToken &Tok = getTok();
3085 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")(static_cast <bool> (Tok.is(AsmToken::Identifier) &&
"Token is not an Identifier") ? void (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3085
, __extension__ __PRETTY_FUNCTION__))
;
3086
3087 StringRef Cond = Tok.getString();
3088 std::string Suggestion;
3089 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3090 if (CC == AArch64CC::Invalid) {
3091 std::string Msg = "invalid condition code";
3092 if (!Suggestion.empty())
3093 Msg += ", did you mean " + Suggestion + "?";
3094 return TokError(Msg);
3095 }
3096 Lex(); // Eat identifier token.
3097
3098 if (invertCondCode) {
3099 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3100 return TokError("condition codes AL and NV are invalid for this instruction");
3101 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
3102 }
3103
3104 Operands.push_back(
3105 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3106 return false;
3107}
3108
3109OperandMatchResultTy
3110AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3111 const AsmToken &Tok = getTok();
3112 SMLoc S = getLoc();
3113
3114 if (Tok.isNot(AsmToken::Identifier)) {
3115 TokError("invalid operand for instruction");
3116 return MatchOperand_ParseFail;
3117 }
3118
3119 unsigned PStateImm = -1;
3120 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3121 if (SVCR && SVCR->haveFeatures(getSTI().getFeatureBits()))
3122 PStateImm = SVCR->Encoding;
3123
3124 Operands.push_back(
3125 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3126 Lex(); // Eat identifier token.
3127 return MatchOperand_Success;
3128}
3129
3130OperandMatchResultTy
3131AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3132 const AsmToken &Tok = getTok();
3133 SMLoc S = getLoc();
3134
3135 StringRef Name = Tok.getString();
3136
3137 if (Name.equals_insensitive("za")) {
3138 Lex(); // eat "za"
3139 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3140 AArch64::ZA, /*ElementWidth=*/0, MatrixKind::Array, S, getLoc(),
3141 getContext()));
3142 if (getLexer().is(AsmToken::LBrac)) {
3143 // There's no comma after matrix operand, so we can parse the next operand
3144 // immediately.
3145 if (parseOperand(Operands, false, false))
3146 return MatchOperand_NoMatch;
3147 }
3148 return MatchOperand_Success;
3149 }
3150
3151 // Try to parse matrix register.
3152 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3153 if (!Reg)
3154 return MatchOperand_NoMatch;
3155
3156 size_t DotPosition = Name.find('.');
3157 assert(DotPosition != StringRef::npos && "Unexpected register")(static_cast <bool> (DotPosition != StringRef::npos &&
"Unexpected register") ? void (0) : __assert_fail ("DotPosition != StringRef::npos && \"Unexpected register\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3157
, __extension__ __PRETTY_FUNCTION__))
;
3158
3159 StringRef Head = Name.take_front(DotPosition);
3160 StringRef Tail = Name.drop_front(DotPosition);
3161 StringRef RowOrColumn = Head.take_back();
3162
3163 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn)
3164 .Case("h", MatrixKind::Row)
3165 .Case("v", MatrixKind::Col)
3166 .Default(MatrixKind::Tile);
3167
3168 // Next up, parsing the suffix
3169 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3170 if (!KindRes) {
3171 TokError("Expected the register to be followed by element width suffix");
3172 return MatchOperand_ParseFail;
3173 }
3174 unsigned ElementWidth = KindRes->second;
3175
3176 Lex();
3177
3178 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3179 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3180
3181 if (getLexer().is(AsmToken::LBrac)) {
3182 // There's no comma after matrix operand, so we can parse the next operand
3183 // immediately.
3184 if (parseOperand(Operands, false, false))
3185 return MatchOperand_NoMatch;
3186 }
3187 return MatchOperand_Success;
3188}
3189
3190/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3191/// them if present.
3192OperandMatchResultTy
3193AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3194 const AsmToken &Tok = getTok();
3195 std::string LowerID = Tok.getString().lower();
3196 AArch64_AM::ShiftExtendType ShOp =
3197 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3198 .Case("lsl", AArch64_AM::LSL)
3199 .Case("lsr", AArch64_AM::LSR)
3200 .Case("asr", AArch64_AM::ASR)
3201 .Case("ror", AArch64_AM::ROR)
3202 .Case("msl", AArch64_AM::MSL)
3203 .Case("uxtb", AArch64_AM::UXTB)
3204 .Case("uxth", AArch64_AM::UXTH)
3205 .Case("uxtw", AArch64_AM::UXTW)
3206 .Case("uxtx", AArch64_AM::UXTX)
3207 .Case("sxtb", AArch64_AM::SXTB)
3208 .Case("sxth", AArch64_AM::SXTH)
3209 .Case("sxtw", AArch64_AM::SXTW)
3210 .Case("sxtx", AArch64_AM::SXTX)
3211 .Default(AArch64_AM::InvalidShiftExtend);
3212
3213 if (ShOp == AArch64_AM::InvalidShiftExtend)
3214 return MatchOperand_NoMatch;
3215
3216 SMLoc S = Tok.getLoc();
3217 Lex();
3218
3219 bool Hash = parseOptionalToken(AsmToken::Hash);
3220
3221 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3222 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3223 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3224 ShOp == AArch64_AM::MSL) {
3225 // We expect a number here.
3226 TokError("expected #imm after shift specifier");
3227 return MatchOperand_ParseFail;
3228 }
3229
3230 // "extend" type operations don't need an immediate, #0 is implicit.
3231 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3232 Operands.push_back(
3233 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3234 return MatchOperand_Success;
3235 }
3236
3237 // Make sure we do actually have a number, identifier or a parenthesized
3238 // expression.
3239 SMLoc E = getLoc();
3240 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3241 !getTok().is(AsmToken::Identifier)) {
3242 Error(E, "expected integer shift amount");
3243 return MatchOperand_ParseFail;
3244 }
3245
3246 const MCExpr *ImmVal;
3247 if (getParser().parseExpression(ImmVal))
3248 return MatchOperand_ParseFail;
3249
3250 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3251 if (!MCE) {
3252 Error(E, "expected constant '#imm' after shift specifier");
3253 return MatchOperand_ParseFail;
3254 }
3255
3256 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3257 Operands.push_back(AArch64Operand::CreateShiftExtend(
3258 ShOp, MCE->getValue(), true, S, E, getContext()));
3259 return MatchOperand_Success;
3260}
3261
3262static const struct Extension {
3263 const char *Name;
3264 const FeatureBitset Features;
3265} ExtensionMap[] = {
3266 {"crc", {AArch64::FeatureCRC}},
3267 {"sm4", {AArch64::FeatureSM4}},
3268 {"sha3", {AArch64::FeatureSHA3}},
3269 {"sha2", {AArch64::FeatureSHA2}},
3270 {"aes", {AArch64::FeatureAES}},
3271 {"crypto", {AArch64::FeatureCrypto}},
3272 {"fp", {AArch64::FeatureFPARMv8}},
3273 {"simd", {AArch64::FeatureNEON}},
3274 {"ras", {AArch64::FeatureRAS}},
3275 {"lse", {AArch64::FeatureLSE}},
3276 {"predres", {AArch64::FeaturePredRes}},
3277 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3278 {"mte", {AArch64::FeatureMTE}},
3279 {"memtag", {AArch64::FeatureMTE}},
3280 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3281 {"pan", {AArch64::FeaturePAN}},
3282 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3283 {"ccpp", {AArch64::FeatureCCPP}},
3284 {"rcpc", {AArch64::FeatureRCPC}},
3285 {"rng", {AArch64::FeatureRandGen}},
3286 {"sve", {AArch64::FeatureSVE}},
3287 {"sve2", {AArch64::FeatureSVE2}},
3288 {"sve2-aes", {AArch64::FeatureSVE2AES}},
3289 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3290 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3291 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3292 {"ls64", {AArch64::FeatureLS64}},
3293 {"xs", {AArch64::FeatureXS}},
3294 {"pauth", {AArch64::FeaturePAuth}},
3295 {"flagm", {AArch64::FeatureFlagM}},
3296 {"rme", {AArch64::FeatureRME}},
3297 {"sme", {AArch64::FeatureSME}},
3298 {"sme-f64", {AArch64::FeatureSMEF64}},
3299 {"sme-i64", {AArch64::FeatureSMEI64}},
3300 {"hbc", {AArch64::FeatureHBC}},
3301 {"mops", {AArch64::FeatureMOPS}},
3302 // FIXME: Unsupported extensions
3303 {"lor", {}},
3304 {"rdma", {}},
3305 {"profile", {}},
3306};
3307
3308static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3309 if (FBS[AArch64::HasV8_0aOps])
3310 Str += "ARMv8a";
3311 if (FBS[AArch64::HasV8_1aOps])
3312 Str += "ARMv8.1a";
3313 else if (FBS[AArch64::HasV8_2aOps])
3314 Str += "ARMv8.2a";
3315 else if (FBS[AArch64::HasV8_3aOps])
3316 Str += "ARMv8.3a";
3317 else if (FBS[AArch64::HasV8_4aOps])
3318 Str += "ARMv8.4a";
3319 else if (FBS[AArch64::HasV8_5aOps])
3320 Str += "ARMv8.5a";
3321 else if (FBS[AArch64::HasV8_6aOps])
3322 Str += "ARMv8.6a";
3323 else if (FBS[AArch64::HasV8_7aOps])
3324 Str += "ARMv8.7a";
3325 else if (FBS[AArch64::HasV8_8aOps])
3326 Str += "ARMv8.8a";
3327 else if (FBS[AArch64::HasV9_0aOps])
3328 Str += "ARMv9-a";
3329 else if (FBS[AArch64::HasV9_1aOps])
3330 Str += "ARMv9.1a";
3331 else if (FBS[AArch64::HasV9_2aOps])
3332 Str += "ARMv9.2a";
3333 else if (FBS[AArch64::HasV9_3aOps])
3334 Str += "ARMv9.3a";
3335 else if (FBS[AArch64::HasV8_0rOps])
3336 Str += "ARMv8r";
3337 else {
3338 SmallVector<std::string, 2> ExtMatches;
3339 for (const auto& Ext : ExtensionMap) {
3340 // Use & in case multiple features are enabled
3341 if ((FBS & Ext.Features) != FeatureBitset())
3342 ExtMatches.push_back(Ext.Name);
3343 }
3344 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3345 }
3346}
3347
3348void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3349 SMLoc S) {
3350 const uint16_t Op2 = Encoding & 7;
3351 const uint16_t Cm = (Encoding & 0x78) >> 3;
3352 const uint16_t Cn = (Encoding & 0x780) >> 7;
3353 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3354
3355 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3356
3357 Operands.push_back(
3358 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3359 Operands.push_back(
3360 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3361 Operands.push_back(
3362 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3363 Expr = MCConstantExpr::create(Op2, getContext());
3364 Operands.push_back(
3365 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3366}
3367
3368/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3369/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3370bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3371 OperandVector &Operands) {
3372 if (Name.contains('.'))
3373 return TokError("invalid operand");
3374
3375 Mnemonic = Name;
3376 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3377
3378 const AsmToken &Tok = getTok();
3379 StringRef Op = Tok.getString();
3380 SMLoc S = Tok.getLoc();
3381
3382 if (Mnemonic == "ic") {
3383 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3384 if (!IC)
3385 return TokError("invalid operand for IC instruction");
3386 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3387 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3388 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
3389 return TokError(Str);
3390 }
3391 createSysAlias(IC->Encoding, Operands, S);
3392 } else if (Mnemonic == "dc") {
3393 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3394 if (!DC)
3395 return TokError("invalid operand for DC instruction");
3396 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3397 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3398 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3399 return TokError(Str);
3400 }
3401 createSysAlias(DC->Encoding, Operands, S);
3402 } else if (Mnemonic == "at") {
3403 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3404 if (!AT)
3405 return TokError("invalid operand for AT instruction");
3406 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3407 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3408 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
3409 return TokError(Str);
3410 }
3411 createSysAlias(AT->Encoding, Operands, S);
3412 } else if (Mnemonic == "tlbi") {
3413 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3414 if (!TLBI)
3415 return TokError("invalid operand for TLBI instruction");
3416 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3417 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3418 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
3419 return TokError(Str);
3420 }
3421 createSysAlias(TLBI->Encoding, Operands, S);
3422 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
3423 const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
3424 if (!PRCTX)
3425 return TokError("invalid operand for prediction restriction instruction");
3426 else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
3427 std::string Str(
3428 Mnemonic.upper() + std::string(PRCTX->Name) + " requires: ");
3429 setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
3430 return TokError(Str);
3431 }
3432 uint16_t PRCTX_Op2 =
3433 Mnemonic == "cfp" ? 4 :
3434 Mnemonic == "dvp" ? 5 :
3435 Mnemonic == "cpp" ? 7 :
3436 0;
3437 assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction")(static_cast <bool> (PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction"
) ? void (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3437
, __extension__ __PRETTY_FUNCTION__))
;
3438 createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
3439 }
3440
3441 Lex(); // Eat operand.
3442
3443 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3444 bool HasRegister = false;
3445
3446 // Check for the optional register operand.
3447 if (parseOptionalToken(AsmToken::Comma)) {
3448 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3449 return TokError("expected register operand");
3450 HasRegister = true;
3451 }
3452
3453 if (ExpectRegister && !HasRegister)
3454 return TokError("specified " + Mnemonic + " op requires a register");
3455 else if (!ExpectRegister && HasRegister)
3456 return TokError("specified " + Mnemonic + " op does not use a register");
3457
3458 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3459 return true;
3460
3461 return false;
3462}
3463
3464OperandMatchResultTy
3465AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3466 MCAsmParser &Parser = getParser();
3467 const AsmToken &Tok = getTok();
3468
3469 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3470 TokError("'csync' operand expected");
3471 return MatchOperand_ParseFail;
3472 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3473 // Immediate operand.
3474 const MCExpr *ImmVal;
3475 SMLoc ExprLoc = getLoc();
3476 AsmToken IntTok = Tok;
3477 if (getParser().parseExpression(ImmVal))
3478 return MatchOperand_ParseFail;
3479 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3480 if (!MCE) {
3481 Error(ExprLoc, "immediate value expected for barrier operand");
3482 return MatchOperand_ParseFail;
3483 }
3484 int64_t Value = MCE->getValue();
3485 if (Mnemonic == "dsb" && Value > 15) {
3486 // This case is a no match here, but it might be matched by the nXS
3487 // variant. Deliberately not unlex the optional '#' as it is not necessary
3488 // to characterize an integer immediate.
3489 Parser.getLexer().UnLex(IntTok);
3490 return MatchOperand_NoMatch;
3491 }
3492 if (Value < 0 || Value > 15) {
3493 Error(ExprLoc, "barrier operand out of range");
3494 return MatchOperand_ParseFail;
3495 }
3496 auto DB = AArch64DB::lookupDBByEncoding(Value);
3497 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3498 ExprLoc, getContext(),
3499 false /*hasnXSModifier*/));
3500 return MatchOperand_Success;
3501 }
3502
3503 if (Tok.isNot(AsmToken::Identifier)) {
3504 TokError("invalid operand for instruction");
3505 return MatchOperand_ParseFail;
3506 }
3507
3508 StringRef Operand = Tok.getString();
3509 auto TSB = AArch64TSB::lookupTSBByName(Operand);
3510 auto DB = AArch64DB::lookupDBByName(Operand);
3511 // The only valid named option for ISB is 'sy'
3512 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3513 TokError("'sy' or #imm operand expected");
3514 return MatchOperand_ParseFail;
3515 // The only valid named option for TSB is 'csync'
3516 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3517 TokError("'csync' operand expected");
3518 return MatchOperand_ParseFail;
3519 } else if (!DB && !TSB) {
3520 if (Mnemonic == "dsb") {
3521 // This case is a no match here, but it might be matched by the nXS
3522 // variant.
3523 return MatchOperand_NoMatch;
3524 }
3525 TokError("invalid barrier option name");
3526 return MatchOperand_ParseFail;
3527 }
3528
3529 Operands.push_back(AArch64Operand::CreateBarrier(
3530 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3531 getContext(), false /*hasnXSModifier*/));
3532 Lex(); // Consume the option
3533
3534 return MatchOperand_Success;
3535}
3536
3537OperandMatchResultTy
3538AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3539 const AsmToken &Tok = getTok();
3540
3541 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands")(static_cast <bool> (Mnemonic == "dsb" && "Instruction does not accept nXS operands"
) ? void (0) : __assert_fail ("Mnemonic == \"dsb\" && \"Instruction does not accept nXS operands\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3541
, __extension__ __PRETTY_FUNCTION__))
;
3542 if (Mnemonic != "dsb")
3543 return MatchOperand_ParseFail;
3544
3545 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3546 // Immediate operand.
3547 const MCExpr *ImmVal;
3548 SMLoc ExprLoc = getLoc();
3549 if (getParser().parseExpression(ImmVal))
3550 return MatchOperand_ParseFail;
3551 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3552 if (!MCE) {
3553 Error(ExprLoc, "immediate value expected for barrier operand");
3554 return MatchOperand_ParseFail;
3555 }
3556 int64_t Value = MCE->getValue();
3557 // v8.7-A DSB in the nXS variant accepts only the following immediate
3558 // values: 16, 20, 24, 28.
3559 if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
3560 Error(ExprLoc, "barrier operand out of range");
3561 return MatchOperand_ParseFail;
3562 }
3563 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
3564 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
3565 ExprLoc, getContext(),
3566 true /*hasnXSModifier*/));
3567 return MatchOperand_Success;
3568 }
3569
3570 if (Tok.isNot(AsmToken::Identifier)) {
3571 TokError("invalid operand for instruction");
3572 return MatchOperand_ParseFail;
3573 }
3574
3575 StringRef Operand = Tok.getString();
3576 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
3577
3578 if (!DB) {
3579 TokError("invalid barrier option name");
3580 return MatchOperand_ParseFail;
3581 }
3582
3583 Operands.push_back(
3584 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
3585 getContext(), true /*hasnXSModifier*/));
3586 Lex(); // Consume the option
3587
3588 return MatchOperand_Success;
3589}
3590
3591OperandMatchResultTy
3592AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3593 const AsmToken &Tok = getTok();
3594
3595 if (Tok.isNot(AsmToken::Identifier))
3596 return MatchOperand_NoMatch;
3597
3598 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
3599 return MatchOperand_NoMatch;
3600
3601 int MRSReg, MSRReg;
3602 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3603 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3604 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3605 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3606 } else
3607 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3608
3609 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3610 unsigned PStateImm = -1;
3611 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3612 PStateImm = PState->Encoding;
3613
3614 Operands.push_back(
3615 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3616 PStateImm, getContext()));
3617 Lex(); // Eat identifier
3618
3619 return MatchOperand_Success;
3620}
3621
3622/// tryParseNeonVectorRegister - Parse a vector register operand.
3623bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3624 if (getTok().isNot(AsmToken::Identifier))
3625 return true;
3626
3627 SMLoc S = getLoc();
3628 // Check for a vector register specifier first.
3629 StringRef Kind;
3630 unsigned Reg;
3631 OperandMatchResultTy Res =
3632 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3633 if (Res != MatchOperand_Success)
3634 return true;
3635
3636 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3637 if (!KindRes)
3638 return true;
3639
3640 unsigned ElementWidth = KindRes->second;
3641 Operands.push_back(
3642 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3643 S, getLoc(), getContext()));
3644
3645 // If there was an explicit qualifier, that goes on as a literal text
3646 // operand.
3647 if (!Kind.empty())
3648 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
3649
3650 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3651}
3652
3653OperandMatchResultTy
3654AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3655 SMLoc SIdx = getLoc();
3656 if (parseOptionalToken(AsmToken::LBrac)) {
3657 const MCExpr *ImmVal;
3658 if (getParser().parseExpression(ImmVal))
3659 return MatchOperand_NoMatch;
3660 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3661 if (!MCE) {
3662 TokError("immediate value expected for vector index");
3663 return MatchOperand_ParseFail;;
3664 }
3665
3666 SMLoc E = getLoc();
3667
3668 if (parseToken(AsmToken::RBrac, "']' expected"))
3669 return MatchOperand_ParseFail;;
3670
3671 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3672 E, getContext()));
3673 return MatchOperand_Success;
3674 }
3675
3676 return MatchOperand_NoMatch;
3677}
3678
3679// tryParseVectorRegister - Try to parse a vector register name with
3680// optional kind specifier. If it is a register specifier, eat the token
3681// and return it.
3682OperandMatchResultTy
3683AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3684 RegKind MatchKind) {
3685 const AsmToken &Tok = getTok();
3686
3687 if (Tok.isNot(AsmToken::Identifier))
3688 return MatchOperand_NoMatch;
3689
3690 StringRef Name = Tok.getString();
3691 // If there is a kind specifier, it's separated from the register name by
3692 // a '.'.
3693 size_t Start = 0, Next = Name.find('.');
3694 StringRef Head = Name.slice(Start, Next);
3695 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3696
3697 if (RegNum) {
3698 if (Next != StringRef::npos) {
3699 Kind = Name.slice(Next, StringRef::npos);
3700 if (!isValidVectorKind(Kind, MatchKind)) {
3701 TokError("invalid vector kind qualifier");
3702 return MatchOperand_ParseFail;
3703 }
3704 }
3705 Lex(); // Eat the register token.
3706
3707 Reg = RegNum;
3708 return MatchOperand_Success;
3709 }
3710
3711 return MatchOperand_NoMatch;
3712}
3713
3714/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3715OperandMatchResultTy
3716AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3717 // Check for a SVE predicate register specifier first.
3718 const SMLoc S = getLoc();
3719 StringRef Kind;
3720 unsigned RegNum;
3721 auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3722 if (Res != MatchOperand_Success)
3723 return Res;
3724
3725 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3726 if (!KindRes)
3727 return MatchOperand_NoMatch;
3728
3729 unsigned ElementWidth = KindRes->second;
3730 Operands.push_back(AArch64Operand::CreateVectorReg(
3731 RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3732 getLoc(), getContext()));
3733
3734 if (getLexer().is(AsmToken::LBrac)) {
3735 // Indexed predicate, there's no comma so try parse the next operand
3736 // immediately.
3737 if (parseOperand(Operands, false, false))
3738 return MatchOperand_NoMatch;
3739 }
3740
3741 // Not all predicates are followed by a '/m' or '/z'.
3742 if (getTok().isNot(AsmToken::Slash))
3743 return MatchOperand_Success;
3744
3745 // But when they do they shouldn't have an element type suffix.
3746 if (!Kind.empty()) {
3747 Error(S, "not expecting size suffix");
3748 return MatchOperand_ParseFail;
3749 }
3750
3751 // Add a literal slash as operand
3752 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
3753
3754 Lex(); // Eat the slash.
3755
3756 // Zeroing or merging?
3757 auto Pred = getTok().getString().lower();
3758 if (Pred != "z" && Pred != "m") {
3759 Error(getLoc(), "expecting 'm' or 'z' predication");
3760 return MatchOperand_ParseFail;
3761 }
3762
3763 // Add zero/merge token.
3764 const char *ZM = Pred == "z" ? "z" : "m";
3765 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
3766
3767 Lex(); // Eat zero/merge token.
3768 return MatchOperand_Success;
3769}
3770
3771/// parseRegister - Parse a register operand.
3772bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3773 // Try for a Neon vector register.
3774 if (!tryParseNeonVectorRegister(Operands))
3775 return false;
3776
3777 // Otherwise try for a scalar register.
3778 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3779 return false;
3780
3781 return true;
3782}
3783
3784bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3785 bool HasELFModifier = false;
3786 AArch64MCExpr::VariantKind RefKind;
3787
3788 if (parseOptionalToken(AsmToken::Colon)) {
3789 HasELFModifier = true;
3790
3791 if (getTok().isNot(AsmToken::Identifier))
3792 return TokError("expect relocation specifier in operand after ':'");
3793
3794 std::string LowerCase = getTok().getIdentifier().lower();
3795 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3796 .Case("lo12", AArch64MCExpr::VK_LO12)
3797 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3798 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3799 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3800 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3801 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3802 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3803 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3804 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3805 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3806 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3807 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3808 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3809 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3810 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3811 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3812 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3813 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3814 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3815 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3816 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3817 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3818 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3819 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3820 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3821 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3822 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3823 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3824 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3825 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3826 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3827 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3828 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3829 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3830 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3831 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3832 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3833 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
3834 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3835 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3836 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3837 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3838 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3839 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3840 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3841 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3842 .Default(AArch64MCExpr::VK_INVALID);
3843
3844 if (RefKind == AArch64MCExpr::VK_INVALID)
3845 return TokError("expect relocation specifier in operand after ':'");
3846
3847 Lex(); // Eat identifier
3848
3849 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3850 return true;
3851 }
3852
3853 if (getParser().parseExpression(ImmVal))
3854 return true;
3855
3856 if (HasELFModifier)
3857 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3858
3859 return false;
3860}
3861
3862OperandMatchResultTy
3863AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
3864 if (getTok().isNot(AsmToken::LCurly))
3865 return MatchOperand_NoMatch;
3866
3867 auto ParseMatrixTile = [this](unsigned &Reg, unsigned &ElementWidth) {
3868 StringRef Name = getTok().getString();
3869 size_t DotPosition = Name.find('.');
3870 if (DotPosition == StringRef::npos)
3871 return MatchOperand_NoMatch;
3872
3873 unsigned RegNum = matchMatrixTileListRegName(Name);
3874 if (!RegNum)
3875 return MatchOperand_NoMatch;
3876
3877 StringRef Tail = Name.drop_front(DotPosition);
3878 const Optional<std::pair<int, int>> &KindRes =
3879 parseVectorKind(Tail, RegKind::Matrix);
3880 if (!KindRes) {
3881 TokError("Expected the register to be followed by element width suffix");
3882 return MatchOperand_ParseFail;
3883 }
3884 ElementWidth = KindRes->second;
3885 Reg = RegNum;
3886 Lex(); // Eat the register.
3887 return MatchOperand_Success;
3888 };
3889
3890 SMLoc S = getLoc();
3891 auto LCurly = getTok();
3892 Lex(); // Eat left bracket token.
3893
3894 // Empty matrix list
3895 if (parseOptionalToken(AsmToken::RCurly)) {
3896 Operands.push_back(AArch64Operand::CreateMatrixTileList(
3897 /*RegMask=*/0, S, getLoc(), getContext()));
3898 return MatchOperand_Success;
3899 }
3900
3901 // Try parse {za} alias early
3902 if (getTok().getString().equals_insensitive("za")) {
3903 Lex(); // Eat 'za'
3904
3905 if (parseToken(AsmToken::RCurly, "'}' expected"))
3906 return MatchOperand_ParseFail;
3907
3908 Operands.push_back(AArch64Operand::CreateMatrixTileList(
3909 /*RegMask=*/0xFF, S, getLoc(), getContext()));
3910 return MatchOperand_Success;
3911 }
3912
3913 SMLoc TileLoc = getLoc();
3914
3915 unsigned FirstReg, ElementWidth;
3916 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
3917 if (ParseRes != MatchOperand_Success) {
3918 getLexer().UnLex(LCurly);
3919 return ParseRes;
3920 }
3921
3922 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3923
3924 unsigned PrevReg = FirstReg;
3925
3926 SmallSet<unsigned, 8> DRegs;
3927 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
3928
3929 SmallSet<unsigned, 8> SeenRegs;
3930 SeenRegs.insert(FirstReg);
3931
3932 while (parseOptionalToken(AsmToken::Comma)) {
3933 TileLoc = getLoc();
3934 unsigned Reg, NextElementWidth;
3935 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
3936 if (ParseRes != MatchOperand_Success)
3937 return ParseRes;
3938
3939 // Element size must match on all regs in the list.
3940 if (ElementWidth != NextElementWidth) {
3941 Error(TileLoc, "mismatched register size suffix");
3942 return MatchOperand_ParseFail;
3943 }
3944
3945 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
3946 Warning(TileLoc, "tile list not in ascending order");
3947
3948 if (SeenRegs.contains(Reg))
3949 Warning(TileLoc, "duplicate tile in list");
3950 else {
3951 SeenRegs.insert(Reg);
3952 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
3953 }
3954
3955 PrevReg = Reg;
3956 }
3957
3958 if (parseToken(AsmToken::RCurly, "'}' expected"))
3959 return MatchOperand_ParseFail;
3960
3961 unsigned RegMask = 0;
3962 for (auto Reg : DRegs)
3963 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
3964 RI->getEncodingValue(AArch64::ZAD0));
3965 Operands.push_back(
3966 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
3967
3968 return MatchOperand_Success;
3969}
3970
3971template <RegKind VectorKind>
3972OperandMatchResultTy
3973AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3974 bool ExpectMatch) {
3975 MCAsmParser &Parser = getParser();
3976 if (!getTok().is(AsmToken::LCurly))
3977 return MatchOperand_NoMatch;
3978
3979 // Wrapper around parse function
3980 auto ParseVector = [this](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3981 bool NoMatchIsError) {
3982 auto RegTok = getTok();
3983 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3984 if (ParseRes == MatchOperand_Success) {
3985 if (parseVectorKind(Kind, VectorKind))
3986 return ParseRes;
3987 llvm_unreachable("Expected a valid vector kind")::llvm::llvm_unreachable_internal("Expected a valid vector kind"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3987
)
;
3988 }
3989
3990 if (RegTok.isNot(AsmToken::Identifier) ||
3991 ParseRes == MatchOperand_ParseFail ||
3992 (ParseRes == MatchOperand_NoMatch && NoMatchIsError &&
3993 !RegTok.getString().startswith_insensitive("za"))) {
3994 Error(Loc, "vector register expected");
3995 return MatchOperand_ParseFail;
3996 }
3997
3998 return MatchOperand_NoMatch;
3999 };
4000
4001 SMLoc S = getLoc();
4002 auto LCurly = getTok();
4003 Lex(); // Eat left bracket token.
4004
4005 StringRef Kind;
4006 unsigned FirstReg;
4007 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4008
4009 // Put back the original left bracket if there was no match, so that
4010 // different types of list-operands can be matched (e.g. SVE, Neon).
4011 if (ParseRes == MatchOperand_NoMatch)
4012 Parser.getLexer().UnLex(LCurly);
4013
4014 if (ParseRes != MatchOperand_Success)
4015 return ParseRes;
4016
4017 int64_t PrevReg = FirstReg;
4018 unsigned Count = 1;
4019
4020 if (parseOptionalToken(AsmToken::Minus)) {
4021 SMLoc Loc = getLoc();
4022 StringRef NextKind;
4023
4024 unsigned Reg;
4025 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4026 if (ParseRes != MatchOperand_Success)
4027 return ParseRes;
4028
4029 // Any Kind suffices must match on all regs in the list.
4030 if (Kind != NextKind) {
4031 Error(Loc, "mismatched register size suffix");
4032 return MatchOperand_ParseFail;
4033 }
4034
4035 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
4036
4037 if (Space == 0 || Space > 3) {
4038 Error(Loc, "invalid number of vectors");
4039 return MatchOperand_ParseFail;
4040 }
4041
4042 Count += Space;
4043 }
4044 else {
4045 while (parseOptionalToken(AsmToken::Comma)) {
4046 SMLoc Loc = getLoc();
4047 StringRef NextKind;
4048 unsigned Reg;
4049 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4050 if (ParseRes != MatchOperand_Success)
4051 return ParseRes;
4052
4053 // Any Kind suffices must match on all regs in the list.
4054 if (Kind != NextKind) {
4055 Error(Loc, "mismatched register size suffix");
4056 return MatchOperand_ParseFail;
4057 }
4058
4059 // Registers must be incremental (with wraparound at 31)
4060 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
4061 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
4062 Error(Loc, "registers must be sequential");
4063 return MatchOperand_ParseFail;
4064 }
4065
4066 PrevReg = Reg;
4067 ++Count;
4068 }
4069 }
4070
4071 if (parseToken(AsmToken::RCurly, "'}' expected"))
4072 return MatchOperand_ParseFail;
4073
4074 if (Count > 4) {
4075 Error(S, "invalid number of vectors");
4076 return MatchOperand_ParseFail;
4077 }
4078
4079 unsigned NumElements = 0;
4080 unsigned ElementWidth = 0;
4081 if (!Kind.empty()) {
4082 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4083 std::tie(NumElements, ElementWidth) = *VK;
4084 }
4085
4086 Operands.push_back(AArch64Operand::CreateVectorList(
4087 FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
4088 getContext()));
4089
4090 return MatchOperand_Success;
4091}
4092
4093/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4094bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4095 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4096 if (ParseRes != MatchOperand_Success)
4097 return true;
4098
4099 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
4100}
4101
4102OperandMatchResultTy
4103AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4104 SMLoc StartLoc = getLoc();
4105
4106 unsigned RegNum;
4107 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4108 if (Res != MatchOperand_Success)
4109 return Res;
4110
4111 if (!parseOptionalToken(AsmToken::Comma)) {
4112 Operands.push_back(AArch64Operand::CreateReg(
4113 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4114 return MatchOperand_Success;
4115 }
4116
4117 parseOptionalToken(AsmToken::Hash);
4118
4119 if (getTok().isNot(AsmToken::Integer)) {
4120 Error(getLoc(), "index must be absent or #0");
4121 return MatchOperand_ParseFail;
4122 }
4123
4124 const MCExpr *ImmVal;
4125 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4126 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
4127 Error(getLoc(), "index must be absent or #0");
4128 return MatchOperand_ParseFail;
4129 }
4130
4131 Operands.push_back(AArch64Operand::CreateReg(
4132 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4133 return MatchOperand_Success;
4134}
4135
4136template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4137OperandMatchResultTy
4138AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4139 SMLoc StartLoc = getLoc();
4140
4141 unsigned RegNum;
4142 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4143 if (Res != MatchOperand_Success)
4144 return Res;
4145
4146 // No shift/extend is the default.
4147 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4148 Operands.push_back(AArch64Operand::CreateReg(
4149 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4150 return MatchOperand_Success;
4151 }
4152
4153 // Eat the comma
4154 Lex();
4155
4156 // Match the shift
4157 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
4158 Res = tryParseOptionalShiftExtend(ExtOpnd);
4159 if (Res != MatchOperand_Success)
4160 return Res;
4161
4162 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4163 Operands.push_back(AArch64Operand::CreateReg(
4164 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4165 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4166 Ext->hasShiftExtendAmount()));
4167
4168 return MatchOperand_Success;
4169}
4170
4171bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4172 MCAsmParser &Parser = getParser();
4173
4174 // Some SVE instructions have a decoration after the immediate, i.e.
4175 // "mul vl". We parse them here and add tokens, which must be present in the
4176 // asm string in the tablegen instruction.
4177 bool NextIsVL =
4178 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4179 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4180 if (!getTok().getString().equals_insensitive("mul") ||
4181 !(NextIsVL || NextIsHash))
4182 return true;
4183
4184 Operands.push_back(
4185 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4186 Lex(); // Eat the "mul"
4187
4188 if (NextIsVL) {
4189 Operands.push_back(
4190 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4191 Lex(); // Eat the "vl"
4192 return false;
4193 }
4194
4195 if (NextIsHash) {
4196 Lex(); // Eat the #
4197 SMLoc S = getLoc();
4198
4199 // Parse immediate operand.
4200 const MCExpr *ImmVal;
4201 if (!Parser.parseExpression(ImmVal))
4202 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4203 Operands.push_back(AArch64Operand::CreateImm(
4204 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4205 getContext()));
4206 return MatchOperand_Success;
4207 }
4208 }
4209
4210 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4211}
4212
4213bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4214 auto Tok = getTok();
4215 if (Tok.isNot(AsmToken::Identifier))
4216 return true;
4217
4218 auto Keyword = Tok.getString();
4219 Keyword = StringSwitch<StringRef>(Keyword.lower())
4220 .Case("sm", "sm")
4221 .Case("za", "za")
4222 .Default(Keyword);
4223 Operands.push_back(
4224 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4225
4226 Lex();
4227 return false;
4228}
4229
4230/// parseOperand - Parse a arm instruction operand. For now this parses the
4231/// operand regardless of the mnemonic.
4232bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4233 bool invertCondCode) {
4234 MCAsmParser &Parser = getParser();
4235
4236 OperandMatchResultTy ResTy =
4237 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
4238
4239 // Check if the current operand has a custom associated parser, if so, try to
4240 // custom parse the operand, or fallback to the general approach.
4241 if (ResTy == MatchOperand_Success)
4242 return false;
4243 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4244 // there was a match, but an error occurred, in which case, just return that
4245 // the operand parsing failed.
4246 if (ResTy == MatchOperand_ParseFail)
4247 return true;
4248
4249 // Nothing custom, so do general case parsing.
4250 SMLoc S, E;
4251 switch (getLexer().getKind()) {
4252 default: {
4253 SMLoc S = getLoc();
4254 const MCExpr *Expr;
4255 if (parseSymbolicImmVal(Expr))
4256 return Error(S, "invalid operand");
4257
4258 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4259 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4260 return false;
4261 }
4262 case AsmToken::LBrac: {
4263 Operands.push_back(
4264 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4265 Lex(); // Eat '['
4266
4267 // There's no comma after a '[', so we can parse the next operand
4268 // immediately.
4269 return parseOperand(Operands, false, false);
4270 }
4271 case AsmToken::LCurly: {
4272 if (!parseNeonVectorList(Operands))
4273 return false;
4274
4275 Operands.push_back(
4276 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4277 Lex(); // Eat '{'
4278
4279 // There's no comma after a '{', so we can parse the next operand
4280 // immediately.
4281 return parseOperand(Operands, false, false);
4282 }
4283 case AsmToken::Identifier: {
4284 // If we're expecting a Condition Code operand, then just parse that.
4285 if (isCondCode)
4286 return parseCondCode(Operands, invertCondCode);
4287
4288 // If it's a register name, parse it.
4289 if (!parseRegister(Operands))
4290 return false;
4291
4292 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4293 // by SVE instructions.
4294 if (!parseOptionalMulOperand(Operands))
4295 return false;
4296
4297 // If this is an "smstart" or "smstop" instruction, parse its special
4298 // keyword operand as an identifier.
4299 if (Mnemonic == "smstart" || Mnemonic == "smstop")
4300 return parseKeywordOperand(Operands);
4301
4302 // This could be an optional "shift" or "extend" operand.
4303 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
4304 // We can only continue if no tokens were eaten.
4305 if (GotShift != MatchOperand_NoMatch)
4306 return GotShift;
4307
4308 // If this is a two-word mnemonic, parse its special keyword
4309 // operand as an identifier.
4310 if (Mnemonic == "brb")
4311 return parseKeywordOperand(Operands);
4312
4313 // This was not a register so parse other operands that start with an
4314 // identifier (like labels) as expressions and create them as immediates.
4315 const MCExpr *IdVal;
4316 S = getLoc();
4317 if (getParser().parseExpression(IdVal))
4318 return true;
4319 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4320 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4321 return false;
4322 }
4323 case AsmToken::Integer:
4324 case AsmToken::Real:
4325 case AsmToken::Hash: {
4326 // #42 -> immediate.
4327 S = getLoc();
4328
4329 parseOptionalToken(AsmToken::Hash);
4330
4331 // Parse a negative sign
4332 bool isNegative = false;
4333 if (getTok().is(AsmToken::Minus)) {
4334 isNegative = true;
4335 // We need to consume this token only when we have a Real, otherwise
4336 // we let parseSymbolicImmVal take care of it
4337 if (Parser.getLexer().peekTok().is(AsmToken::Real))
4338 Lex();
4339 }
4340
4341 // The only Real that should come through here is a literal #0.0 for
4342 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4343 // so convert the value.
4344 const AsmToken &Tok = getTok();
4345 if (Tok.is(AsmToken::Real)) {
4346 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4347 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4348 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4349 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4350 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4351 return TokError("unexpected floating point literal");
4352 else if (IntVal != 0 || isNegative)
4353 return TokError("expected floating-point constant #0.0");
4354 Lex(); // Eat the token.
4355
4356 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4357 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4358 return false;
4359 }
4360
4361 const MCExpr *ImmVal;
4362 if (parseSymbolicImmVal(ImmVal))
4363 return true;
4364
4365 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4366 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4367 return false;
4368 }
4369 case AsmToken::Equal: {
4370 SMLoc Loc = getLoc();
4371 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4372 return TokError("unexpected token in operand");
4373 Lex(); // Eat '='
4374 const MCExpr *SubExprVal;
4375 if (getParser().parseExpression(SubExprVal))
4376 return true;
4377
4378 if (Operands.size() < 2 ||
4379 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4380 return Error(Loc, "Only valid when first operand is register");
4381
4382 bool IsXReg =
4383 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4384 Operands[1]->getReg());
4385
4386 MCContext& Ctx = getContext();
4387 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4388 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4389 if (isa<MCConstantExpr>(SubExprVal)) {
4390 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4391 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4392 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
4393 ShiftAmt += 16;
4394 Imm >>= 16;
4395 }
4396 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4397 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4398 Operands.push_back(AArch64Operand::CreateImm(
4399 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4400 if (ShiftAmt)
4401 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4402 ShiftAmt, true, S, E, Ctx));
4403 return false;
4404 }
4405 APInt Simm = APInt(64, Imm << ShiftAmt);
4406 // check if the immediate is an unsigned or signed 32-bit int for W regs
4407 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4408 return Error(Loc, "Immediate too large for register");
4409 }
4410 // If it is a label or an imm that cannot fit in a movz, put it into CP.
4411 const MCExpr *CPLoc =
4412 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4413 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4414 return false;
4415 }
4416 }
4417}
4418
4419bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4420 const MCExpr *Expr = nullptr;
4421 SMLoc L = getLoc();
4422 if (check(getParser().parseExpression(Expr), L, "expected expression"))
4423 return true;
4424 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4425 if (check(!Value, L, "expected constant expression"))
4426 return true;
4427 Out = Value->getValue();
4428 return false;
4429}
4430
4431bool AArch64AsmParser::parseComma() {
4432 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
4433 return true;
4434 // Eat the comma
4435 Lex();
4436 return false;
4437}
4438
4439bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
4440 unsigned First, unsigned Last) {
4441 unsigned Reg;
25
'Reg' declared without an initial value
4442 SMLoc Start, End;
4443 if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register"))
26
Calling 'AArch64AsmParser::ParseRegister'
35
Returning from 'AArch64AsmParser::ParseRegister'
36
Assuming the condition is false
37
Taking false branch
4444 return true;
4445
4446 // Special handling for FP and LR; they aren't linearly after x28 in
4447 // the registers enum.
4448 unsigned RangeEnd = Last;
4449 if (Base
37.1
'Base' is equal to X0
== AArch64::X0) {
38
Taking true branch
4450 if (Last
38.1
'Last' is equal to FP
== AArch64::FP) {
39
Taking true branch
4451 RangeEnd = AArch64::X28;
4452 if (Reg == AArch64::FP) {
40
The left operand of '==' is a garbage value
4453 Out = 29;
4454 return false;
4455 }
4456 }
4457 if (Last == AArch64::LR) {
4458 RangeEnd = AArch64::X28;
4459 if (Reg == AArch64::FP) {
4460 Out = 29;
4461 return false;
4462 } else if (Reg == AArch64::LR) {
4463 Out = 30;
4464 return false;
4465 }
4466 }
4467 }
4468
4469 if (check(Reg < First || Reg > RangeEnd, Start,
4470 Twine("expected register in range ") +
4471 AArch64InstPrinter::getRegisterName(First) + " to " +
4472 AArch64InstPrinter::getRegisterName(Last)))
4473 return true;
4474 Out = Reg - Base;
4475 return false;
4476}
4477
4478bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
4479 const MCParsedAsmOperand &Op2) const {
4480 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
4481 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
4482 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
4483 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
4484 return MCTargetAsmParser::regsEqual(Op1, Op2);
4485
4486 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&(static_cast <bool> (AOp1.isScalarReg() && AOp2
.isScalarReg() && "Testing equality of non-scalar registers not supported"
) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 4487
, __extension__ __PRETTY_FUNCTION__))
4487 "Testing equality of non-scalar registers not supported")(static_cast <bool> (AOp1.isScalarReg() && AOp2
.isScalarReg() && "Testing equality of non-scalar registers not supported"
) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 4487
, __extension__ __PRETTY_FUNCTION__))
;
4488
4489 // Check if a registers match their sub/super register classes.
4490 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
4491 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
4492 if (AOp1.getRegEqualityTy() == EqualsSubReg)
4493 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
4494 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
4495 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
4496 if (AOp2.getRegEqualityTy() == EqualsSubReg)
4497 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
4498
4499 return false;
4500}
4501
4502/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
4503/// operands.
4504bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
4505 StringRef Name, SMLoc NameLoc,
4506 OperandVector &Operands) {
4507 Name = StringSwitch<StringRef>(Name.lower())
4508 .Case("beq", "b.eq")
4509 .Case("bne", "b.ne")
4510 .Case("bhs", "b.hs")
4511 .Case("bcs", "b.cs")
4512 .Case("blo", "b.lo")
4513 .Case("bcc", "b.cc")
4514 .Case("bmi", "b.mi")
4515 .Case("bpl", "b.pl")
4516 .Case("bvs", "b.vs")
4517 .Case("bvc", "b.vc")
4518 .Case("bhi", "b.hi")
4519 .Case("bls", "b.ls")
4520 .Case("bge", "b.ge")
4521 .Case("blt", "b.lt")
4522 .Case("bgt", "b.gt")
4523 .Case("ble", "b.le")
4524 .Case("bal", "b.al")
4525 .Case("bnv", "b.nv")
4526 .Default(Name);
4527
4528 // First check for the AArch64-specific .req directive.
4529 if (getTok().is(AsmToken::Identifier) &&
4530 getTok().getIdentifier().lower() == ".req") {
4531 parseDirectiveReq(Name, NameLoc);
4532 // We always return 'error' for this, as we're done with this
4533 // statement and don't need to match the 'instruction."
4534 return true;
4535 }
4536
4537 // Create the leading tokens for the mnemonic, split by '.' characters.
4538 size_t Start = 0, Next = Name.find('.');
4539 StringRef Head = Name.slice(Start, Next);
4540
4541 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
4542 // the SYS instruction.
4543 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
4544 Head == "cfp" || Head == "dvp" || Head == "cpp")
4545 return parseSysAlias(Head, NameLoc, Operands);
4546
4547 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
4548 Mnemonic = Head;
4549
4550 // Handle condition codes for a branch mnemonic
4551 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
4552 Start = Next;
4553 Next = Name.find('.', Start + 1);
4554 Head = Name.slice(Start + 1, Next);
4555
4556 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4557 (Head.data() - Name.data()));
4558 std::string Suggestion;
4559 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
4560 if (CC == AArch64CC::Invalid) {
4561 std::string Msg = "invalid condition code";
4562 if (!Suggestion.empty())
4563 Msg += ", did you mean " + Suggestion + "?";
4564 return Error(SuffixLoc, Msg);
4565 }
4566 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
4567 /*IsSuffix=*/true));
4568 Operands.push_back(
4569 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
4570 }
4571
4572 // Add the remaining tokens in the mnemonic.
4573 while (Next != StringRef::npos) {
4574 Start = Next;
4575 Next = Name.find('.', Start + 1);
4576 Head = Name.slice(Start, Next);
4577 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4578 (Head.data() - Name.data()) + 1);
4579 Operands.push_back(AArch64Operand::CreateToken(
4580 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
4581 }
4582
4583 // Conditional compare instructions have a Condition Code operand, which needs
4584 // to be parsed and an immediate operand created.
4585 bool condCodeFourthOperand =
4586 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
4587 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
4588 Head == "csinc" || Head == "csinv" || Head == "csneg");
4589
4590 // These instructions are aliases to some of the conditional select
4591 // instructions. However, the condition code is inverted in the aliased
4592 // instruction.
4593 //
4594 // FIXME: Is this the correct way to handle these? Or should the parser
4595 // generate the aliased instructions directly?
4596 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
4597 bool condCodeThirdOperand =
4598 (Head == "cinc" || Head == "cinv" || Head == "cneg");
4599
4600 // Read the remaining operands.
4601 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4602
4603 unsigned N = 1;
4604 do {
4605 // Parse and remember the operand.
4606 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
4607 (N == 3 && condCodeThirdOperand) ||
4608 (N == 2 && condCodeSecondOperand),
4609 condCodeSecondOperand || condCodeThirdOperand)) {
4610 return true;
4611 }
4612
4613 // After successfully parsing some operands there are three special cases
4614 // to consider (i.e. notional operands not separated by commas). Two are
4615 // due to memory specifiers:
4616 // + An RBrac will end an address for load/store/prefetch
4617 // + An '!' will indicate a pre-indexed operation.
4618 //
4619 // And a further case is '}', which ends a group of tokens specifying the
4620 // SME accumulator array 'ZA' or tile vector, i.e.
4621 //
4622 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
4623 //
4624 // It's someone else's responsibility to make sure these tokens are sane
4625 // in the given context!
4626
4627 if (parseOptionalToken(AsmToken::RBrac))
4628 Operands.push_back(
4629 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4630 if (parseOptionalToken(AsmToken::Exclaim))
4631 Operands.push_back(
4632 AArch64Operand::CreateToken("!", getLoc(), getContext()));
4633 if (parseOptionalToken(AsmToken::RCurly))
4634 Operands.push_back(
4635 AArch64Operand::CreateToken("}", getLoc(), getContext()));
4636
4637 ++N;
4638 } while (parseOptionalToken(AsmToken::Comma));
4639 }
4640
4641 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4642 return true;
4643
4644 return false;
4645}
4646
4647static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
4648 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31))(static_cast <bool> ((ZReg >= AArch64::Z0) &&
(ZReg <= AArch64::Z31)) ? void (0) : __assert_fail ("(ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 4648
, __extension__ __PRETTY_FUNCTION__))
;
4649 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
4650 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
4651 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
4652 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
4653 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
4654 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
4655}
4656
4657// FIXME: This entire function is a giant hack to provide us with decent
4658// operand range validation/diagnostics until TableGen/MC can be extended
4659// to support autogeneration of this kind of validation.
4660bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
4661 SmallVectorImpl<SMLoc> &Loc) {
4662 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4663 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
4664
4665 // A prefix only applies to the instruction following it. Here we extract
4666 // prefix information for the next instruction before validating the current
4667 // one so that in the case of failure we don't erronously continue using the
4668 // current prefix.
4669 PrefixInfo Prefix = NextPrefix;
4670 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
4671
4672 // Before validating the instruction in isolation we run through the rules
4673 // applicable when it follows a prefix instruction.
4674 // NOTE: brk & hlt can be prefixed but require no additional validation.
4675 if (Prefix.isActive() &&
4676 (Inst.getOpcode() != AArch64::BRK) &&
4677 (Inst.getOpcode() != AArch64::HLT)) {
4678
4679 // Prefixed intructions must have a destructive operand.
4680 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
4681 AArch64::NotDestructive)
4682 return Error(IDLoc, "instruction is unpredictable when following a"
4683 " movprfx, suggest replacing movprfx with mov");
4684
4685 // Destination operands must match.
4686 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
4687 return Error(Loc[0], "instruction is unpredictable when following a"
4688 " movprfx writing to a different destination");
4689
4690 // Destination operand must not be used in any other location.
4691 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
4692 if (Inst.getOperand(i).isReg() &&
4693 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
4694 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
4695 return Error(Loc[0], "instruction is unpredictable when following a"
4696 " movprfx and destination also used as non-destructive"
4697 " source");
4698 }
4699
4700 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
4701 if (Prefix.isPredicated()) {
4702 int PgIdx = -1;
4703
4704 // Find the instructions general predicate.
4705 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
4706 if (Inst.getOperand(i).isReg() &&
4707 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
4708 PgIdx = i;
4709 break;
4710 }
4711
4712 // Instruction must be predicated if the movprfx is predicated.
4713 if (PgIdx == -1 ||
4714 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
4715 return Error(IDLoc, "instruction is unpredictable when following a"
4716 " predicated movprfx, suggest using unpredicated movprfx");
4717
4718 // Instruction must use same general predicate as the movprfx.
4719 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
4720 return Error(IDLoc, "instruction is unpredictable when following a"
4721 " predicated movprfx using a different general predicate");
4722
4723 // Instruction element type must match the movprfx.
4724 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
4725 return Error(IDLoc, "instruction is unpredictable when following a"
4726 " predicated movprfx with a different element size");
4727 }
4728 }
4729
4730 // Check for indexed addressing modes w/ the base register being the
4731 // same as a destination/source register or pair load where
4732 // the Rt == Rt2. All of those are undefined behaviour.
4733 switch (Inst.getOpcode()) {
4734 case AArch64::LDPSWpre:
4735 case AArch64::LDPWpost:
4736 case AArch64::LDPWpre:
4737 case AArch64::LDPXpost:
4738 case AArch64::LDPXpre: {
4739 unsigned Rt = Inst.getOperand(1).getReg();
4740 unsigned Rt2 = Inst.getOperand(2).getReg();
4741 unsigned Rn = Inst.getOperand(3).getReg();
4742 if (RI->isSubRegisterEq(Rn, Rt))
4743 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
4744 "is also a destination");
4745 if (RI->isSubRegisterEq(Rn, Rt2))
4746 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
4747 "is also a destination");
4748 [[fallthrough]];
4749 }
4750 case AArch64::LDPDi:
4751 case AArch64::LDPQi:
4752 case AArch64::LDPSi:
4753 case AArch64::LDPSWi:
4754 case AArch64::LDPWi:
4755 case AArch64::LDPXi: {
4756 unsigned Rt = Inst.getOperand(0).getReg();
4757 unsigned Rt2 = Inst.getOperand(1).getReg();
4758 if (Rt == Rt2)
4759 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4760 break;
4761 }
4762 case AArch64::LDPDpost:
4763 case AArch64::LDPDpre:
4764 case AArch64::LDPQpost:
4765 case AArch64::LDPQpre:
4766 case AArch64::LDPSpost:
4767 case AArch64::LDPSpre:
4768 case AArch64::LDPSWpost: {
4769 unsigned Rt = Inst.getOperand(1).getReg();
4770 unsigned Rt2 = Inst.getOperand(2).getReg();
4771 if (Rt == Rt2)
4772 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4773 break;
4774 }
4775 case AArch64::STPDpost:
4776 case AArch64::STPDpre:
4777 case AArch64::STPQpost:
4778 case AArch64::STPQpre:
4779 case AArch64::STPSpost:
4780 case AArch64::STPSpre:
4781 case AArch64::STPWpost:
4782 case AArch64::STPWpre:
4783 case AArch64::STPXpost:
4784 case AArch64::STPXpre: {
4785 unsigned Rt = Inst.getOperand(1).getReg();
4786 unsigned Rt2 = Inst.getOperand(2).getReg();
4787 unsigned Rn = Inst.getOperand(3).getReg();
4788 if (RI->isSubRegisterEq(Rn, Rt))
4789 return Error(Loc[0], "unpredictable STP instruction, writeback base "
4790 "is also a source");
4791 if (RI->isSubRegisterEq(Rn, Rt2))
4792 return Error(Loc[1], "unpredictable STP instruction, writeback base "
4793 "is also a source");
4794 break;
4795 }
4796 case AArch64::LDRBBpre:
4797 case AArch64::LDRBpre:
4798 case AArch64::LDRHHpre:
4799 case AArch64::LDRHpre:
4800 case AArch64::LDRSBWpre:
4801 case AArch64::LDRSBXpre:
4802 case AArch64::LDRSHWpre:
4803 case AArch64::LDRSHXpre:
4804 case AArch64::LDRSWpre:
4805 case AArch64::LDRWpre:
4806 case AArch64::LDRXpre:
4807 case AArch64::LDRBBpost:
4808 case AArch64::LDRBpost:
4809 case AArch64::LDRHHpost:
4810 case AArch64::LDRHpost:
4811 case AArch64::LDRSBWpost:
4812 case AArch64::LDRSBXpost:
4813 case AArch64::LDRSHWpost:
4814 case AArch64::LDRSHXpost:
4815 case AArch64::LDRSWpost:
4816 case AArch64::LDRWpost:
4817 case AArch64::LDRXpost: {
4818 unsigned Rt = Inst.getOperand(1).getReg();
4819 unsigned Rn = Inst.getOperand(2).getReg();
4820 if (RI->isSubRegisterEq(Rn, Rt))
4821 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4822 "is also a source");
4823 break;
4824 }
4825 case AArch64::STRBBpost:
4826 case AArch64::STRBpost:
4827 case AArch64::STRHHpost:
4828 case AArch64::STRHpost:
4829 case AArch64::STRWpost:
4830 case AArch64::STRXpost:
4831 case AArch64::STRBBpre:
4832 case AArch64::STRBpre:
4833 case AArch64::STRHHpre:
4834 case AArch64::STRHpre:
4835 case AArch64::STRWpre:
4836 case AArch64::STRXpre: {
4837 unsigned Rt = Inst.getOperand(1).getReg();
4838 unsigned Rn = Inst.getOperand(2).getReg();
4839 if (RI->isSubRegisterEq(Rn, Rt))
4840 return Error(Loc[0], "unpredictable STR instruction, writeback base "
4841 "is also a source");
4842 break;
4843 }
4844 case AArch64::STXRB:
4845 case AArch64::STXRH:
4846 case AArch64::STXRW:
4847 case AArch64::STXRX:
4848 case AArch64::STLXRB:
4849 case AArch64::STLXRH:
4850 case AArch64::STLXRW:
4851 case AArch64::STLXRX: {
4852 unsigned Rs = Inst.getOperand(0).getReg();
4853 unsigned Rt = Inst.getOperand(1).getReg();
4854 unsigned Rn = Inst.getOperand(2).getReg();
4855 if (RI->isSubRegisterEq(Rt, Rs) ||
4856 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4857 return Error(Loc[0],
4858 "unpredictable STXR instruction, status is also a source");
4859 break;
4860 }
4861 case AArch64::STXPW:
4862 case AArch64::STXPX:
4863 case AArch64::STLXPW:
4864 case AArch64::STLXPX: {
4865 unsigned Rs = Inst.getOperand(0).getReg();
4866 unsigned Rt1 = Inst.getOperand(1).getReg();
4867 unsigned Rt2 = Inst.getOperand(2).getReg();
4868 unsigned Rn = Inst.getOperand(3).getReg();
4869 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4870 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4871 return Error(Loc[0],
4872 "unpredictable STXP instruction, status is also a source");
4873 break;
4874 }
4875 case AArch64::LDRABwriteback:
4876 case AArch64::LDRAAwriteback: {
4877 unsigned Xt = Inst.getOperand(0).getReg();
4878 unsigned Xn = Inst.getOperand(1).getReg();
4879 if (Xt == Xn)
4880 return Error(Loc[0],
4881 "unpredictable LDRA instruction, writeback base"
4882 " is also a destination");
4883 break;
4884 }
4885 }
4886
4887 // Check v8.8-A memops instructions.
4888 switch (Inst.getOpcode()) {
4889 case AArch64::CPYFP:
4890 case AArch64::CPYFPWN:
4891 case AArch64::CPYFPRN:
4892 case AArch64::CPYFPN:
4893 case AArch64::CPYFPWT:
4894 case AArch64::CPYFPWTWN:
4895 case AArch64::CPYFPWTRN:
4896 case AArch64::CPYFPWTN:
4897 case AArch64::CPYFPRT:
4898 case AArch64::CPYFPRTWN:
4899 case AArch64::CPYFPRTRN:
4900 case AArch64::CPYFPRTN:
4901 case AArch64::CPYFPT:
4902 case AArch64::CPYFPTWN:
4903 case AArch64::CPYFPTRN:
4904 case AArch64::CPYFPTN:
4905 case AArch64::CPYFM:
4906 case AArch64::CPYFMWN:
4907 case AArch64::CPYFMRN:
4908 case AArch64::CPYFMN:
4909 case AArch64::CPYFMWT:
4910 case AArch64::CPYFMWTWN:
4911 case AArch64::CPYFMWTRN:
4912 case AArch64::CPYFMWTN:
4913 case AArch64::CPYFMRT:
4914 case AArch64::CPYFMRTWN:
4915 case AArch64::CPYFMRTRN:
4916 case AArch64::CPYFMRTN:
4917 case AArch64::CPYFMT:
4918 case AArch64::CPYFMTWN:
4919 case AArch64::CPYFMTRN:
4920 case AArch64::CPYFMTN:
4921 case AArch64::CPYFE:
4922 case AArch64::CPYFEWN:
4923 case AArch64::CPYFERN:
4924 case AArch64::CPYFEN:
4925 case AArch64::CPYFEWT:
4926 case AArch64::CPYFEWTWN:
4927 case AArch64::CPYFEWTRN:
4928 case AArch64::CPYFEWTN:
4929 case AArch64::CPYFERT:
4930 case AArch64::CPYFERTWN:
4931 case AArch64::CPYFERTRN:
4932 case AArch64::CPYFERTN:
4933 case AArch64::CPYFET:
4934 case AArch64::CPYFETWN:
4935 case AArch64::CPYFETRN:
4936 case AArch64::CPYFETN:
4937 case AArch64::CPYP:
4938 case AArch64::CPYPWN:
4939 case AArch64::CPYPRN:
4940 case AArch64::CPYPN:
4941 case AArch64::CPYPWT:
4942 case AArch64::CPYPWTWN:
4943 case AArch64::CPYPWTRN:
4944 case AArch64::CPYPWTN:
4945 case AArch64::CPYPRT:
4946 case AArch64::CPYPRTWN:
4947 case AArch64::CPYPRTRN:
4948 case AArch64::CPYPRTN:
4949 case AArch64::CPYPT:
4950 case AArch64::CPYPTWN:
4951 case AArch64::CPYPTRN:
4952 case AArch64::CPYPTN:
4953 case AArch64::CPYM:
4954 case AArch64::CPYMWN:
4955 case AArch64::CPYMRN:
4956 case AArch64::CPYMN:
4957 case AArch64::CPYMWT:
4958 case AArch64::CPYMWTWN:
4959 case AArch64::CPYMWTRN:
4960 case AArch64::CPYMWTN:
4961 case AArch64::CPYMRT:
4962 case AArch64::CPYMRTWN:
4963 case AArch64::CPYMRTRN:
4964 case AArch64::CPYMRTN:
4965 case AArch64::CPYMT:
4966 case AArch64::CPYMTWN:
4967 case AArch64::CPYMTRN:
4968 case AArch64::CPYMTN:
4969 case AArch64::CPYE:
4970 case AArch64::CPYEWN:
4971 case AArch64::CPYERN:
4972 case AArch64::CPYEN:
4973 case AArch64::CPYEWT:
4974 case AArch64::CPYEWTWN:
4975 case AArch64::CPYEWTRN:
4976 case AArch64::CPYEWTN:
4977 case AArch64::CPYERT:
4978 case AArch64::CPYERTWN:
4979 case AArch64::CPYERTRN:
4980 case AArch64::CPYERTN:
4981 case AArch64::CPYET:
4982 case AArch64::CPYETWN:
4983 case AArch64::CPYETRN:
4984 case AArch64::CPYETN: {
4985 unsigned Xd_wb = Inst.getOperand(0).getReg();
4986 unsigned Xs_wb = Inst.getOperand(1).getReg();
4987 unsigned Xn_wb = Inst.getOperand(2).getReg();
4988 unsigned Xd = Inst.getOperand(3).getReg();
4989 unsigned Xs = Inst.getOperand(4).getReg();
4990 unsigned Xn = Inst.getOperand(5).getReg();
4991 if (Xd_wb != Xd)
4992 return Error(Loc[0],
4993 "invalid CPY instruction, Xd_wb and Xd do not match");
4994 if (Xs_wb != Xs)
4995 return Error(Loc[0],
4996 "invalid CPY instruction, Xs_wb and Xs do not match");
4997 if (Xn_wb != Xn)
4998 return Error(Loc[0],
4999 "invalid CPY instruction, Xn_wb and Xn do not match");
5000 if (Xd == Xs)
5001 return Error(Loc[0], "invalid CPY instruction, destination and source"
5002 " registers are the same");
5003 if (Xd == Xn)
5004 return Error(Loc[0], "invalid CPY instruction, destination and size"
5005 " registers are the same");
5006 if (Xs == Xn)
5007 return Error(Loc[0], "invalid CPY instruction, source and size"
5008 " registers are the same");
5009 break;
5010 }
5011 case AArch64::SETP:
5012 case AArch64::SETPT:
5013 case AArch64::SETPN:
5014 case AArch64::SETPTN:
5015 case AArch64::SETM:
5016 case AArch64::SETMT:
5017 case AArch64::SETMN:
5018 case AArch64::SETMTN:
5019 case AArch64::SETE:
5020 case AArch64::SETET:
5021 case AArch64::SETEN:
5022 case AArch64::SETETN:
5023 case AArch64::SETGP:
5024 case AArch64::SETGPT:
5025 case AArch64::SETGPN:
5026 case AArch64::SETGPTN:
5027 case AArch64::SETGM:
5028 case AArch64::SETGMT:
5029 case AArch64::SETGMN:
5030 case AArch64::SETGMTN:
5031 case AArch64::MOPSSETGE:
5032 case AArch64::MOPSSETGET:
5033 case AArch64::MOPSSETGEN:
5034 case AArch64::MOPSSETGETN: {
5035 unsigned Xd_wb = Inst.getOperand(0).getReg();
5036 unsigned Xn_wb = Inst.getOperand(1).getReg();
5037 unsigned Xd = Inst.getOperand(2).getReg();
5038 unsigned Xn = Inst.getOperand(3).getReg();
5039 unsigned Xm = Inst.getOperand(4).getReg();
5040 if (Xd_wb != Xd)
5041 return Error(Loc[0],
5042 "invalid SET instruction, Xd_wb and Xd do not match");
5043 if (Xn_wb != Xn)
5044 return Error(Loc[0],
5045 "invalid SET instruction, Xn_wb and Xn do not match");
5046 if (Xd == Xn)
5047 return Error(Loc[0], "invalid SET instruction, destination and size"
5048 " registers are the same");
5049 if (Xd == Xm)
5050 return Error(Loc[0], "invalid SET instruction, destination and source"
5051 " registers are the same");
5052 if (Xn == Xm)
5053 return Error(Loc[0], "invalid SET instruction, source and size"
5054 " registers are the same");
5055 break;
5056 }
5057 }
5058
5059 // Now check immediate ranges. Separate from the above as there is overlap
5060 // in the instructions being checked and this keeps the nested conditionals
5061 // to a minimum.
5062 switch (Inst.getOpcode()) {
5063 case AArch64::ADDSWri:
5064 case AArch64::ADDSXri:
5065 case AArch64::ADDWri:
5066 case AArch64::ADDXri:
5067 case AArch64::SUBSWri:
5068 case AArch64::SUBSXri:
5069 case AArch64::SUBWri:
5070 case AArch64::SUBXri: {
5071 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5072 // some slight duplication here.
5073 if (Inst.getOperand(2).isExpr()) {
5074 const MCExpr *Expr = Inst.getOperand(2).getExpr();
5075 AArch64MCExpr::VariantKind ELFRefKind;
5076 MCSymbolRefExpr::VariantKind DarwinRefKind;
5077 int64_t Addend;
5078 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
5079
5080 // Only allow these with ADDXri.
5081 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
5082 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
5083 Inst.getOpcode() == AArch64::ADDXri)
5084 return false;
5085
5086 // Only allow these with ADDXri/ADDWri
5087 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
5088 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
5089 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
5090 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
5091 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
5092 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
5093 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
5094 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
5095 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
5096 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
5097 (Inst.getOpcode() == AArch64::ADDXri ||
5098 Inst.getOpcode() == AArch64::ADDWri))
5099 return false;
5100
5101 // Don't allow symbol refs in the immediate field otherwise
5102 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5103 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5104 // 'cmp w0, 'borked')
5105 return Error(Loc.back(), "invalid immediate expression");
5106 }
5107 // We don't validate more complex expressions here
5108 }
5109 return false;
5110 }
5111 default:
5112 return false;
5113 }
5114}
5115
5116static std::string AArch64MnemonicSpellCheck(StringRef S,
5117 const FeatureBitset &FBS,
5118 unsigned VariantID = 0);
5119
5120bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5121 uint64_t ErrorInfo,
5122 OperandVector &Operands) {
5123 switch (ErrCode) {
5124 case Match_InvalidTiedOperand: {
5125 RegConstraintEqualityTy EqTy =
5126 static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
5127 .getRegEqualityTy();
5128 switch (EqTy) {
5129 case RegConstraintEqualityTy::EqualsSubReg:
5130 return Error(Loc, "operand must be 64-bit form of destination register");
5131 case RegConstraintEqualityTy::EqualsSuperReg:
5132 return Error(Loc, "operand must be 32-bit form of destination register");
5133 case RegConstraintEqualityTy::EqualsReg:
5134 return Error(Loc, "operand must match destination register");
5135 }
5136 llvm_unreachable("Unknown RegConstraintEqualityTy")::llvm::llvm_unreachable_internal("Unknown RegConstraintEqualityTy"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5136
)
;
5137 }
5138 case Match_MissingFeature:
5139 return Error(Loc,
5140 "instruction requires a CPU feature not currently enabled");
5141 case Match_InvalidOperand:
5142 return Error(Loc, "invalid operand for instruction");
5143 case Match_InvalidSuffix:
5144 return Error(Loc, "invalid type suffix for instruction");
5145 case Match_InvalidCondCode:
5146 return Error(Loc, "expected AArch64 condition code");
5147 case Match_AddSubRegExtendSmall:
5148 return Error(Loc,
5149 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5150 case Match_AddSubRegExtendLarge:
5151 return Error(Loc,
5152 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5153 case Match_AddSubSecondSource:
5154 return Error(Loc,
5155 "expected compatible register, symbol or integer in range [0, 4095]");
5156 case Match_LogicalSecondSource:
5157 return Error(Loc, "expected compatible register or logical immediate");
5158 case Match_InvalidMovImm32Shift:
5159 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
5160 case Match_InvalidMovImm64Shift:
5161 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
5162 case Match_AddSubRegShift32:
5163 return Error(Loc,
5164 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5165 case Match_AddSubRegShift64:
5166 return Error(Loc,
5167 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5168 case Match_InvalidFPImm:
5169 return Error(Loc,
5170 "expected compatible register or floating-point constant");
5171 case Match_InvalidMemoryIndexedSImm6:
5172 return Error(Loc, "index must be an integer in range [-32, 31].");
5173 case Match_InvalidMemoryIndexedSImm5:
5174 return Error(Loc, "index must be an integer in range [-16, 15].");
5175 case Match_InvalidMemoryIndexed1SImm4:
5176 return Error(Loc, "index must be an integer in range [-8, 7].");
5177 case Match_InvalidMemoryIndexed2SImm4:
5178 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5179 case Match_InvalidMemoryIndexed3SImm4:
5180 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5181 case Match_InvalidMemoryIndexed4SImm4:
5182 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5183 case Match_InvalidMemoryIndexed16SImm4:
5184 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5185 case Match_InvalidMemoryIndexed32SImm4:
5186 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5187 case Match_InvalidMemoryIndexed1SImm6:
5188 return Error(Loc, "index must be an integer in range [-32, 31].");
5189 case Match_InvalidMemoryIndexedSImm8:
5190 return Error(Loc, "index must be an integer in range [-128, 127].");
5191 case Match_InvalidMemoryIndexedSImm9:
5192 return Error(Loc, "index must be an integer in range [-256, 255].");
5193 case Match_InvalidMemoryIndexed16SImm9:
5194 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5195 case Match_InvalidMemoryIndexed8SImm10:
5196 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5197 case Match_InvalidMemoryIndexed4SImm7:
5198 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5199 case Match_InvalidMemoryIndexed8SImm7:
5200 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5201 case Match_InvalidMemoryIndexed16SImm7:
5202 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5203 case Match_InvalidMemoryIndexed8UImm5:
5204 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5205 case Match_InvalidMemoryIndexed4UImm5:
5206 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5207 case Match_InvalidMemoryIndexed2UImm5:
5208 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5209 case Match_InvalidMemoryIndexed8UImm6:
5210 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5211 case Match_InvalidMemoryIndexed16UImm6:
5212 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5213 case Match_InvalidMemoryIndexed4UImm6:
5214 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5215 case Match_InvalidMemoryIndexed2UImm6:
5216 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5217 case Match_InvalidMemoryIndexed1UImm6:
5218 return Error(Loc, "index must be in range [0, 63].");
5219 case Match_InvalidMemoryWExtend8:
5220 return Error(Loc,
5221 "expected 'uxtw' or 'sxtw' with optional shift of #0");
5222 case Match_InvalidMemoryWExtend16:
5223 return Error(Loc,
5224 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5225 case Match_InvalidMemoryWExtend32:
5226 return Error(Loc,
5227 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5228 case Match_InvalidMemoryWExtend64:
5229 return Error(Loc,
5230 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5231 case Match_InvalidMemoryWExtend128:
5232 return Error(Loc,
5233 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5234 case Match_InvalidMemoryXExtend8:
5235 return Error(Loc,
5236 "expected 'lsl' or 'sxtx' with optional shift of #0");
5237 case Match_InvalidMemoryXExtend16:
5238 return Error(Loc,
5239 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5240 case Match_InvalidMemoryXExtend32:
5241 return Error(Loc,
5242 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5243 case Match_InvalidMemoryXExtend64:
5244 return Error(Loc,
5245 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5246 case Match_InvalidMemoryXExtend128:
5247 return Error(Loc,
5248 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5249 case Match_InvalidMemoryIndexed1:
5250 return Error(Loc, "index must be an integer in range [0, 4095].");
5251 case Match_InvalidMemoryIndexed2:
5252 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
5253 case Match_InvalidMemoryIndexed4:
5254 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
5255 case Match_InvalidMemoryIndexed8:
5256 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
5257 case Match_InvalidMemoryIndexed16:
5258 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
5259 case Match_InvalidImm0_0:
5260 return Error(Loc, "immediate must be 0.");
5261 case Match_InvalidImm0_1:
5262 return Error(Loc, "immediate must be an integer in range [0, 1].");
5263 case Match_InvalidImm0_3:
5264 return Error(Loc, "immediate must be an integer in range [0, 3].");
5265 case Match_InvalidImm0_7:
5266 return Error(Loc, "immediate must be an integer in range [0, 7].");
5267 case Match_InvalidImm0_15:
5268 return Error(Loc, "immediate must be an integer in range [0, 15].");
5269 case Match_InvalidImm0_31:
5270 return Error(Loc, "immediate must be an integer in range [0, 31].");
5271 case Match_InvalidImm0_63:
5272 return Error(Loc, "immediate must be an integer in range [0, 63].");
5273 case Match_InvalidImm0_127:
5274 return Error(Loc, "immediate must be an integer in range [0, 127].");
5275 case Match_InvalidImm0_255:
5276 return Error(Loc, "immediate must be an integer in range [0, 255].");
5277 case Match_InvalidImm0_65535:
5278 return Error(Loc, "immediate must be an integer in range [0, 65535].");
5279 case Match_InvalidImm1_8:
5280 return Error(Loc, "immediate must be an integer in range [1, 8].");
5281 case Match_InvalidImm1_16:
5282 return Error(Loc, "immediate must be an integer in range [1, 16].");
5283 case Match_InvalidImm1_32:
5284 return Error(Loc, "immediate must be an integer in range [1, 32].");
5285 case Match_InvalidImm1_64:
5286 return Error(Loc, "immediate must be an integer in range [1, 64].");
5287 case Match_InvalidSVEAddSubImm8:
5288 return Error(Loc, "immediate must be an integer in range [0, 255]"
5289 " with a shift amount of 0");
5290 case Match_InvalidSVEAddSubImm16:
5291 case Match_InvalidSVEAddSubImm32:
5292 case Match_InvalidSVEAddSubImm64:
5293 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
5294 "multiple of 256 in range [256, 65280]");
5295 case Match_InvalidSVECpyImm8:
5296 return Error(Loc, "immediate must be an integer in range [-128, 255]"
5297 " with a shift amount of 0");
5298 case Match_InvalidSVECpyImm16:
5299 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5300 "multiple of 256 in range [-32768, 65280]");
5301 case Match_InvalidSVECpyImm32:
5302 case Match_InvalidSVECpyImm64:
5303 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5304 "multiple of 256 in range [-32768, 32512]");
5305 case Match_InvalidIndexRange0_0:
5306 return Error(Loc, "expected lane specifier '[0]'");
5307 case Match_InvalidIndexRange1_1:
5308 return Error(Loc, "expected lane specifier '[1]'");
5309 case Match_InvalidIndexRange0_15:
5310 return Error(Loc, "vector lane must be an integer in range [0, 15].");
5311 case Match_InvalidIndexRange0_7:
5312 return Error(Loc, "vector lane must be an integer in range [0, 7].");
5313 case Match_InvalidIndexRange0_3:
5314 return Error(Loc, "vector lane must be an integer in range [0, 3].");
5315 case Match_InvalidIndexRange0_1:
5316 return Error(Loc, "vector lane must be an integer in range [0, 1].");
5317 case Match_InvalidSVEIndexRange0_63:
5318 return Error(Loc, "vector lane must be an integer in range [0, 63].");
5319 case Match_InvalidSVEIndexRange0_31:
5320 return Error(Loc, "vector lane must be an integer in range [0, 31].");
5321 case Match_InvalidSVEIndexRange0_15:
5322 return Error(Loc, "vector lane must be an integer in range [0, 15].");
5323 case Match_InvalidSVEIndexRange0_7:
5324 return Error(Loc, "vector lane must be an integer in range [0, 7].");
5325 case Match_InvalidSVEIndexRange0_3:
5326 return Error(Loc, "vector lane must be an integer in range [0, 3].");
5327 case Match_InvalidLabel:
5328 return Error(Loc, "expected label or encodable integer pc offset");
5329 case Match_MRS:
5330 return Error(Loc, "expected readable system register");
5331 case Match_MSR:
5332 case Match_InvalidSVCR:
5333 return Error(Loc, "expected writable system register or pstate");
5334 case Match_InvalidComplexRotationEven:
5335 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
5336 case Match_InvalidComplexRotationOdd:
5337 return Error(Loc, "complex rotation must be 90 or 270.");
5338 case Match_MnemonicFail: {
5339 std::string Suggestion = AArch64MnemonicSpellCheck(
5340 ((AArch64Operand &)*Operands[0]).getToken(),
5341 ComputeAvailableFeatures(STI->getFeatureBits()));
5342 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
5343 }
5344 case Match_InvalidGPR64shifted8:
5345 return Error(Loc, "register must be x0..x30 or xzr, without shift");
5346 case Match_InvalidGPR64shifted16:
5347 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
5348 case Match_InvalidGPR64shifted32:
5349 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
5350 case Match_InvalidGPR64shifted64:
5351 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
5352 case Match_InvalidGPR64shifted128:
5353 return Error(
5354 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
5355 case Match_InvalidGPR64NoXZRshifted8:
5356 return Error(Loc, "register must be x0..x30 without shift");
5357 case Match_InvalidGPR64NoXZRshifted16:
5358 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
5359 case Match_InvalidGPR64NoXZRshifted32:
5360 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
5361 case Match_InvalidGPR64NoXZRshifted64:
5362 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
5363 case Match_InvalidGPR64NoXZRshifted128:
5364 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
5365 case Match_InvalidZPR32UXTW8:
5366 case Match_InvalidZPR32SXTW8:
5367 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
5368 case Match_InvalidZPR32UXTW16:
5369 case Match_InvalidZPR32SXTW16:
5370 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
5371 case Match_InvalidZPR32UXTW32:
5372 case Match_InvalidZPR32SXTW32:
5373 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
5374 case Match_InvalidZPR32UXTW64:
5375 case Match_InvalidZPR32SXTW64:
5376 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
5377 case Match_InvalidZPR64UXTW8:
5378 case Match_InvalidZPR64SXTW8:
5379 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
5380 case Match_InvalidZPR64UXTW16:
5381 case Match_InvalidZPR64SXTW16:
5382 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
5383 case Match_InvalidZPR64UXTW32:
5384 case Match_InvalidZPR64SXTW32:
5385 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
5386 case Match_InvalidZPR64UXTW64:
5387 case Match_InvalidZPR64SXTW64:
5388 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
5389 case Match_InvalidZPR32LSL8:
5390 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
5391 case Match_InvalidZPR32LSL16:
5392 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
5393 case Match_InvalidZPR32LSL32:
5394 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
5395 case Match_InvalidZPR32LSL64:
5396 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
5397 case Match_InvalidZPR64LSL8:
5398 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
5399 case Match_InvalidZPR64LSL16:
5400 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
5401 case Match_InvalidZPR64LSL32:
5402 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
5403 case Match_InvalidZPR64LSL64:
5404 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
5405 case Match_InvalidZPR0:
5406 return Error(Loc, "expected register without element width suffix");
5407 case Match_InvalidZPR8:
5408 case Match_InvalidZPR16:
5409 case Match_InvalidZPR32:
5410 case Match_InvalidZPR64:
5411 case Match_InvalidZPR128:
5412 return Error(Loc, "invalid element width");
5413 case Match_InvalidZPR_3b8:
5414 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
5415 case Match_InvalidZPR_3b16:
5416 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
5417 case Match_InvalidZPR_3b32:
5418 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
5419 case Match_InvalidZPR_4b16:
5420 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
5421 case Match_InvalidZPR_4b32:
5422 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
5423 case Match_InvalidZPR_4b64:
5424 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
5425 case Match_InvalidSVEPattern:
5426 return Error(Loc, "invalid predicate pattern");
5427 case Match_InvalidSVEPredicateAnyReg:
5428 case Match_InvalidSVEPredicateBReg:
5429 case Match_InvalidSVEPredicateHReg:
5430 case Match_InvalidSVEPredicateSReg:
5431 case Match_InvalidSVEPredicateDReg:
5432 return Error(Loc, "invalid predicate register.");
5433 case Match_InvalidSVEPredicate3bAnyReg:
5434 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
5435 case Match_InvalidSVEExactFPImmOperandHalfOne:
5436 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
5437 case Match_InvalidSVEExactFPImmOperandHalfTwo:
5438 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
5439 case Match_InvalidSVEExactFPImmOperandZeroOne:
5440 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
5441 case Match_InvalidMatrixTileVectorH8:
5442 case Match_InvalidMatrixTileVectorV8:
5443 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
5444 case Match_InvalidMatrixTileVectorH16:
5445 case Match_InvalidMatrixTileVectorV16:
5446 return Error(Loc,
5447 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
5448 case Match_InvalidMatrixTileVectorH32:
5449 case Match_InvalidMatrixTileVectorV32:
5450 return Error(Loc,
5451 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
5452 case Match_InvalidMatrixTileVectorH64:
5453 case Match_InvalidMatrixTileVectorV64:
5454 return Error(Loc,
5455 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
5456 case Match_InvalidMatrixTileVectorH128:
5457 case Match_InvalidMatrixTileVectorV128:
5458 return Error(Loc,
5459 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
5460 case Match_InvalidMatrixTile32:
5461 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
5462 case Match_InvalidMatrixTile64:
5463 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
5464 case Match_InvalidMatrix:
5465 return Error(Loc, "invalid matrix operand, expected za");
5466 case Match_InvalidMatrixIndexGPR32_12_15:
5467 return Error(Loc, "operand must be a register in range [w12, w15]");
5468 default:
5469 llvm_unreachable("unexpected error code!")::llvm::llvm_unreachable_internal("unexpected error code!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5469)
;
5470 }
5471}
5472
5473static const char *getSubtargetFeatureName(uint64_t Val);
5474
5475bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
5476 OperandVector &Operands,
5477 MCStreamer &Out,
5478 uint64_t &ErrorInfo,
5479 bool MatchingInlineAsm) {
5480 assert(!Operands.empty() && "Unexpect empty operand list!")(static_cast <bool> (!Operands.empty() && "Unexpect empty operand list!"
) ? void (0) : __assert_fail ("!Operands.empty() && \"Unexpect empty operand list!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5480
, __extension__ __PRETTY_FUNCTION__))
;
5481 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
5482 assert(Op.isToken() && "Leading operand should always be a mnemonic!")(static_cast <bool> (Op.isToken() && "Leading operand should always be a mnemonic!"
) ? void (0) : __assert_fail ("Op.isToken() && \"Leading operand should always be a mnemonic!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5482
, __extension__ __PRETTY_FUNCTION__))
;
5483
5484 StringRef Tok = Op.getToken();
5485 unsigned NumOperands = Operands.size();
5486
5487 if (NumOperands == 4 && Tok == "lsl") {
5488 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
5489 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5490 if (Op2.isScalarReg() && Op3.isImm()) {
5491 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5492 if (Op3CE) {
5493 uint64_t Op3Val = Op3CE->getValue();
5494 uint64_t NewOp3Val = 0;
5495 uint64_t NewOp4Val = 0;
5496 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
5497 Op2.getReg())) {
5498 NewOp3Val = (32 - Op3Val) & 0x1f;
5499 NewOp4Val = 31 - Op3Val;
5500 } else {
5501 NewOp3Val = (64 - Op3Val) & 0x3f;
5502 NewOp4Val = 63 - Op3Val;
5503 }
5504
5505 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
5506 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
5507
5508 Operands[0] =
5509 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
5510 Operands.push_back(AArch64Operand::CreateImm(
5511 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
5512 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
5513 Op3.getEndLoc(), getContext());
5514 }
5515 }
5516 } else if (NumOperands == 4 && Tok == "bfc") {
5517 // FIXME: Horrible hack to handle BFC->BFM alias.
5518 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5519 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
5520 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
5521
5522 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
5523 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
5524 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
5525
5526 if (LSBCE && WidthCE) {
5527 uint64_t LSB = LSBCE->getValue();
5528 uint64_t Width = WidthCE->getValue();
5529
5530 uint64_t RegWidth = 0;
5531 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5532 Op1.getReg()))
5533 RegWidth = 64;
5534 else
5535 RegWidth = 32;
5536
5537 if (LSB >= RegWidth)
5538 return Error(LSBOp.getStartLoc(),
5539 "expected integer in range [0, 31]");
5540 if (Width < 1 || Width > RegWidth)
5541 return Error(WidthOp.getStartLoc(),
5542 "expected integer in range [1, 32]");
5543
5544 uint64_t ImmR = 0;
5545 if (RegWidth == 32)
5546 ImmR = (32 - LSB) & 0x1f;
5547 else
5548 ImmR = (64 - LSB) & 0x3f;
5549
5550 uint64_t ImmS = Width - 1;
5551
5552 if (ImmR != 0 && ImmS >= ImmR)
5553 return Error(WidthOp.getStartLoc(),
5554 "requested insert overflows register");
5555
5556 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
5557 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
5558 Operands[0] =
5559 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
5560 Operands[2] = AArch64Operand::CreateReg(
5561 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
5562 SMLoc(), SMLoc(), getContext());
5563 Operands[3] = AArch64Operand::CreateImm(
5564 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
5565 Operands.emplace_back(
5566 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
5567 WidthOp.getEndLoc(), getContext()));
5568 }
5569 }
5570 } else if (NumOperands == 5) {
5571 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
5572 // UBFIZ -> UBFM aliases.
5573 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
5574 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5575 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5576 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
5577
5578 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
5579 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5580 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
5581
5582 if (Op3CE && Op4CE) {
5583 uint64_t Op3Val = Op3CE->getValue();
5584 uint64_t Op4Val = Op4CE->getValue();
5585
5586 uint64_t RegWidth = 0;
5587 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5588 Op1.getReg()))
5589 RegWidth = 64;
5590 else
5591 RegWidth = 32;
5592
5593 if (Op3Val >= RegWidth)
5594 return Error(Op3.getStartLoc(),
5595 "expected integer in range [0, 31]");
5596 if (Op4Val < 1 || Op4Val > RegWidth)
5597 return Error(Op4.getStartLoc(),
5598 "expected integer in range [1, 32]");
5599
5600 uint64_t NewOp3Val = 0;
5601 if (RegWidth == 32)
5602 NewOp3Val = (32 - Op3Val) & 0x1f;
5603 else
5604 NewOp3Val = (64 - Op3Val) & 0x3f;
5605
5606 uint64_t NewOp4Val = Op4Val - 1;
5607
5608 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
5609 return Error(Op4.getStartLoc(),
5610 "requested insert overflows register");
5611
5612 const MCExpr *NewOp3 =
5613 MCConstantExpr::create(NewOp3Val, getContext());
5614 const MCExpr *NewOp4 =
5615 MCConstantExpr::create(NewOp4Val, getContext());
5616 Operands[3] = AArch64Operand::CreateImm(
5617 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
5618 Operands[4] = AArch64Operand::CreateImm(
5619 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
5620 if (Tok == "bfi")
5621 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
5622 getContext());
5623 else if (Tok == "sbfiz")
5624 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
5625 getContext());
5626 else if (Tok == "ubfiz")
5627 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
5628 getContext());
5629 else
5630 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5630
)
;
5631 }
5632 }
5633
5634 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
5635 // UBFX -> UBFM aliases.
5636 } else if (NumOperands == 5 &&
5637 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
5638 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5639 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5640 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
5641
5642 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
5643 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5644 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
5645
5646 if (Op3CE && Op4CE) {
5647 uint64_t Op3Val = Op3CE->getValue();
5648 uint64_t Op4Val = Op4CE->getValue();
5649
5650 uint64_t RegWidth = 0;
5651 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5652 Op1.getReg()))
5653 RegWidth = 64;
5654 else
5655 RegWidth = 32;
5656
5657 if (Op3Val >= RegWidth)
5658 return Error(Op3.getStartLoc(),
5659 "expected integer in range [0, 31]");
5660 if (Op4Val < 1 || Op4Val > RegWidth)
5661 return Error(Op4.getStartLoc(),
5662 "expected integer in range [1, 32]");
5663
5664 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
5665
5666 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
5667 return Error(Op4.getStartLoc(),
5668 "requested extract overflows register");
5669
5670 const MCExpr *NewOp4 =
5671 MCConstantExpr::create(NewOp4Val, getContext());
5672 Operands[4] = AArch64Operand::CreateImm(
5673 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
5674 if (Tok == "bfxil")
5675 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
5676 getContext());
5677 else if (Tok == "sbfx")
5678 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
5679 getContext());
5680 else if (Tok == "ubfx")
5681 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
5682 getContext());
5683 else
5684 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5684
)
;
5685 }
5686 }
5687 }
5688 }
5689
5690 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
5691 // instruction for FP registers correctly in some rare circumstances. Convert
5692 // it to a safe instruction and warn (because silently changing someone's
5693 // assembly is rude).
5694 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
5695 NumOperands == 4 && Tok == "movi") {
5696 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5697 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
5698 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5699 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
5700 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
5701 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
5702 if (Suffix.lower() == ".2d" &&
5703 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
5704 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
5705 " correctly on this CPU, converting to equivalent movi.16b");
5706 // Switch the suffix to .16b.
5707 unsigned Idx = Op1.isToken() ? 1 : 2;
5708 Operands[Idx] =
5709 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
5710 }
5711 }
5712 }
5713
5714 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
5715 // InstAlias can't quite handle this since the reg classes aren't
5716 // subclasses.
5717 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
5718 // The source register can be Wn here, but the matcher expects a
5719 // GPR64. Twiddle it here if necessary.
5720 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5721 if (Op.isScalarReg()) {
5722 unsigned Reg = getXRegFromWReg(Op.getReg());
5723 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5724 Op.getStartLoc(), Op.getEndLoc(),
5725 getContext());
5726 }
5727 }
5728 // FIXME: Likewise for sxt[bh] with a Xd dst operand
5729 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
5730 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5731 if (Op.isScalarReg() &&
5732 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5733 Op.getReg())) {
5734 // The source register can be Wn here, but the matcher expects a
5735 // GPR64. Twiddle it here if necessary.
5736 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5737 if (Op.isScalarReg()) {
5738 unsigned Reg = getXRegFromWReg(Op.getReg());
5739 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5740 Op.getStartLoc(),
5741 Op.getEndLoc(), getContext());
5742 }
5743 }
5744 }
5745 // FIXME: Likewise for uxt[bh] with a Xd dst operand
5746 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
5747 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5748 if (Op.isScalarReg() &&a