Bug Summary

File:llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Warning:line 4456, column 17
The left operand of '<' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name AArch64AsmParser.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/lib/Target/AArch64 -I lib/Target/AArch64 -I include -I /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/include -I lib/Target/AArch64/AsmParser/.. -I /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/lib/Target/AArch64/AsmParser/.. -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-19-134126-35450-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
10#include "MCTargetDesc/AArch64AddressingModes.h"
11#include "MCTargetDesc/AArch64InstPrinter.h"
12#include "MCTargetDesc/AArch64MCExpr.h"
13#include "MCTargetDesc/AArch64MCTargetDesc.h"
14#include "MCTargetDesc/AArch64TargetStreamer.h"
15#include "TargetInfo/AArch64TargetInfo.h"
16#include "Utils/AArch64BaseInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringExtras.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/StringSwitch.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
31#include "llvm/MC/MCLinkerOptimizationHint.h"
32#include "llvm/MC/MCObjectFileInfo.h"
33#include "llvm/MC/MCParser/MCAsmLexer.h"
34#include "llvm/MC/MCParser/MCAsmParser.h"
35#include "llvm/MC/MCParser/MCAsmParserExtension.h"
36#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
37#include "llvm/MC/MCParser/MCTargetAsmParser.h"
38#include "llvm/MC/MCRegisterInfo.h"
39#include "llvm/MC/MCStreamer.h"
40#include "llvm/MC/MCSubtargetInfo.h"
41#include "llvm/MC/MCSymbol.h"
42#include "llvm/MC/MCTargetOptions.h"
43#include "llvm/MC/MCValue.h"
44#include "llvm/MC/SubtargetFeature.h"
45#include "llvm/MC/TargetRegistry.h"
46#include "llvm/Support/Casting.h"
47#include "llvm/Support/Compiler.h"
48#include "llvm/Support/ErrorHandling.h"
49#include "llvm/Support/MathExtras.h"
50#include "llvm/Support/SMLoc.h"
51#include "llvm/Support/TargetParser.h"
52#include "llvm/Support/raw_ostream.h"
53#include <cassert>
54#include <cctype>
55#include <cstdint>
56#include <cstdio>
57#include <string>
58#include <tuple>
59#include <utility>
60#include <vector>
61
62using namespace llvm;
63
64namespace {
65
66enum class RegKind {
67 Scalar,
68 NeonVector,
69 SVEDataVector,
70 SVEPredicateVector,
71 Matrix
72};
73
74enum class MatrixKind { Array, Tile, Row, Col };
75
76enum RegConstraintEqualityTy {
77 EqualsReg,
78 EqualsSuperReg,
79 EqualsSubReg
80};
81
82class AArch64AsmParser : public MCTargetAsmParser {
83private:
84 StringRef Mnemonic; ///< Instruction mnemonic.
85
86 // Map of register aliases registers via the .req directive.
87 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
88
89 class PrefixInfo {
90 public:
91 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
92 PrefixInfo Prefix;
93 switch (Inst.getOpcode()) {
94 case AArch64::MOVPRFX_ZZ:
95 Prefix.Active = true;
96 Prefix.Dst = Inst.getOperand(0).getReg();
97 break;
98 case AArch64::MOVPRFX_ZPmZ_B:
99 case AArch64::MOVPRFX_ZPmZ_H:
100 case AArch64::MOVPRFX_ZPmZ_S:
101 case AArch64::MOVPRFX_ZPmZ_D:
102 Prefix.Active = true;
103 Prefix.Predicated = true;
104 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
105 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 106
, __extension__ __PRETTY_FUNCTION__))
106 "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 106
, __extension__ __PRETTY_FUNCTION__))
;
107 Prefix.Dst = Inst.getOperand(0).getReg();
108 Prefix.Pg = Inst.getOperand(2).getReg();
109 break;
110 case AArch64::MOVPRFX_ZPzZ_B:
111 case AArch64::MOVPRFX_ZPzZ_H:
112 case AArch64::MOVPRFX_ZPzZ_S:
113 case AArch64::MOVPRFX_ZPzZ_D:
114 Prefix.Active = true;
115 Prefix.Predicated = true;
116 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
117 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 118
, __extension__ __PRETTY_FUNCTION__))
118 "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone
&& "No destructive element size set for movprfx") ? void
(0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 118
, __extension__ __PRETTY_FUNCTION__))
;
119 Prefix.Dst = Inst.getOperand(0).getReg();
120 Prefix.Pg = Inst.getOperand(1).getReg();
121 break;
122 default:
123 break;
124 }
125
126 return Prefix;
127 }
128
129 PrefixInfo() : Active(false), Predicated(false) {}
130 bool isActive() const { return Active; }
131 bool isPredicated() const { return Predicated; }
132 unsigned getElementSize() const {
133 assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail
("Predicated", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 133, __extension__ __PRETTY_FUNCTION__))
;
134 return ElementSize;
135 }
136 unsigned getDstReg() const { return Dst; }
137 unsigned getPgReg() const {
138 assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail
("Predicated", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 138, __extension__ __PRETTY_FUNCTION__))
;
139 return Pg;
140 }
141
142 private:
143 bool Active;
144 bool Predicated;
145 unsigned ElementSize;
146 unsigned Dst;
147 unsigned Pg;
148 } NextPrefix;
149
150 AArch64TargetStreamer &getTargetStreamer() {
151 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
152 return static_cast<AArch64TargetStreamer &>(TS);
153 }
154
155 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
156
157 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
158 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
159 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
160 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
161 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
162 bool parseRegister(OperandVector &Operands);
163 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
164 bool parseNeonVectorList(OperandVector &Operands);
165 bool parseOptionalMulOperand(OperandVector &Operands);
166 bool parseKeywordOperand(OperandVector &Operands);
167 bool parseOperand(OperandVector &Operands, bool isCondCode,
168 bool invertCondCode);
169 bool parseImmExpr(int64_t &Out);
170 bool parseComma();
171 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
172 unsigned Last);
173
174 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
175 OperandVector &Operands);
176
177 bool parseDirectiveArch(SMLoc L);
178 bool parseDirectiveArchExtension(SMLoc L);
179 bool parseDirectiveCPU(SMLoc L);
180 bool parseDirectiveInst(SMLoc L);
181
182 bool parseDirectiveTLSDescCall(SMLoc L);
183
184 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
185 bool parseDirectiveLtorg(SMLoc L);
186
187 bool parseDirectiveReq(StringRef Name, SMLoc L);
188 bool parseDirectiveUnreq(SMLoc L);
189 bool parseDirectiveCFINegateRAState();
190 bool parseDirectiveCFIBKeyFrame();
191
192 bool parseDirectiveVariantPCS(SMLoc L);
193
194 bool parseDirectiveSEHAllocStack(SMLoc L);
195 bool parseDirectiveSEHPrologEnd(SMLoc L);
196 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
197 bool parseDirectiveSEHSaveFPLR(SMLoc L);
198 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
199 bool parseDirectiveSEHSaveReg(SMLoc L);
200 bool parseDirectiveSEHSaveRegX(SMLoc L);
201 bool parseDirectiveSEHSaveRegP(SMLoc L);
202 bool parseDirectiveSEHSaveRegPX(SMLoc L);
203 bool parseDirectiveSEHSaveLRPair(SMLoc L);
204 bool parseDirectiveSEHSaveFReg(SMLoc L);
205 bool parseDirectiveSEHSaveFRegX(SMLoc L);
206 bool parseDirectiveSEHSaveFRegP(SMLoc L);
207 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
208 bool parseDirectiveSEHSetFP(SMLoc L);
209 bool parseDirectiveSEHAddFP(SMLoc L);
210 bool parseDirectiveSEHNop(SMLoc L);
211 bool parseDirectiveSEHSaveNext(SMLoc L);
212 bool parseDirectiveSEHEpilogStart(SMLoc L);
213 bool parseDirectiveSEHEpilogEnd(SMLoc L);
214 bool parseDirectiveSEHTrapFrame(SMLoc L);
215 bool parseDirectiveSEHMachineFrame(SMLoc L);
216 bool parseDirectiveSEHContext(SMLoc L);
217 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
218
219 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
220 SmallVectorImpl<SMLoc> &Loc);
221 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
222 OperandVector &Operands, MCStreamer &Out,
223 uint64_t &ErrorInfo,
224 bool MatchingInlineAsm) override;
225/// @name Auto-generated Match Functions
226/// {
227
228#define GET_ASSEMBLER_HEADER
229#include "AArch64GenAsmMatcher.inc"
230
231 /// }
232
233 OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
234 OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
235 RegKind MatchKind);
236 OperandMatchResultTy tryParseMatrixRegister(OperandVector &Operands);
237 OperandMatchResultTy tryParseSVCR(OperandVector &Operands);
238 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
239 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
240 OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
241 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
242 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
243 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
244 template <bool IsSVEPrefetch = false>
245 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
246 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
247 OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
248 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
249 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
250 template<bool AddFPZeroAsLiteral>
251 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
252 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
253 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
254 bool tryParseNeonVectorRegister(OperandVector &Operands);
255 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
256 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
257 template <bool ParseShiftExtend,
258 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
259 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
260 template <bool ParseShiftExtend, bool ParseSuffix>
261 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
262 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
263 template <RegKind VectorKind>
264 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
265 bool ExpectMatch = false);
266 OperandMatchResultTy tryParseMatrixTileList(OperandVector &Operands);
267 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
268 OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands);
269
270public:
271 enum AArch64MatchResultTy {
272 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
273#define GET_OPERAND_DIAGNOSTIC_TYPES
274#include "AArch64GenAsmMatcher.inc"
275 };
276 bool IsILP32;
277
278 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
279 const MCInstrInfo &MII, const MCTargetOptions &Options)
280 : MCTargetAsmParser(Options, STI, MII) {
281 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
282 MCAsmParserExtension::Initialize(Parser);
283 MCStreamer &S = getParser().getStreamer();
284 if (S.getTargetStreamer() == nullptr)
285 new AArch64TargetStreamer(S);
286
287 // Alias .hword/.word/.[dx]word to the target-independent
288 // .2byte/.4byte/.8byte directives as they have the same form and
289 // semantics:
290 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
291 Parser.addAliasForDirective(".hword", ".2byte");
292 Parser.addAliasForDirective(".word", ".4byte");
293 Parser.addAliasForDirective(".dword", ".8byte");
294 Parser.addAliasForDirective(".xword", ".8byte");
295
296 // Initialize the set of available features.
297 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
298 }
299
300 bool regsEqual(const MCParsedAsmOperand &Op1,
301 const MCParsedAsmOperand &Op2) const override;
302 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
303 SMLoc NameLoc, OperandVector &Operands) override;
304 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
305 OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
306 SMLoc &EndLoc) override;
307 bool ParseDirective(AsmToken DirectiveID) override;
308 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
309 unsigned Kind) override;
310
311 static bool classifySymbolRef(const MCExpr *Expr,
312 AArch64MCExpr::VariantKind &ELFRefKind,
313 MCSymbolRefExpr::VariantKind &DarwinRefKind,
314 int64_t &Addend);
315};
316
317/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
318/// instruction.
319class AArch64Operand : public MCParsedAsmOperand {
320private:
321 enum KindTy {
322 k_Immediate,
323 k_ShiftedImm,
324 k_CondCode,
325 k_Register,
326 k_MatrixRegister,
327 k_MatrixTileList,
328 k_SVCR,
329 k_VectorList,
330 k_VectorIndex,
331 k_Token,
332 k_SysReg,
333 k_SysCR,
334 k_Prefetch,
335 k_ShiftExtend,
336 k_FPImm,
337 k_Barrier,
338 k_PSBHint,
339 k_BTIHint,
340 } Kind;
341
342 SMLoc StartLoc, EndLoc;
343
344 struct TokOp {
345 const char *Data;
346 unsigned Length;
347 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
348 };
349
350 // Separate shift/extend operand.
351 struct ShiftExtendOp {
352 AArch64_AM::ShiftExtendType Type;
353 unsigned Amount;
354 bool HasExplicitAmount;
355 };
356
357 struct RegOp {
358 unsigned RegNum;
359 RegKind Kind;
360 int ElementWidth;
361
362 // The register may be allowed as a different register class,
363 // e.g. for GPR64as32 or GPR32as64.
364 RegConstraintEqualityTy EqualityTy;
365
366 // In some cases the shift/extend needs to be explicitly parsed together
367 // with the register, rather than as a separate operand. This is needed
368 // for addressing modes where the instruction as a whole dictates the
369 // scaling/extend, rather than specific bits in the instruction.
370 // By parsing them as a single operand, we avoid the need to pass an
371 // extra operand in all CodeGen patterns (because all operands need to
372 // have an associated value), and we avoid the need to update TableGen to
373 // accept operands that have no associated bits in the instruction.
374 //
375 // An added benefit of parsing them together is that the assembler
376 // can give a sensible diagnostic if the scaling is not correct.
377 //
378 // The default is 'lsl #0' (HasExplicitAmount = false) if no
379 // ShiftExtend is specified.
380 ShiftExtendOp ShiftExtend;
381 };
382
383 struct MatrixRegOp {
384 unsigned RegNum;
385 unsigned ElementWidth;
386 MatrixKind Kind;
387 };
388
389 struct MatrixTileListOp {
390 unsigned RegMask = 0;
391 };
392
393 struct VectorListOp {
394 unsigned RegNum;
395 unsigned Count;
396 unsigned NumElements;
397 unsigned ElementWidth;
398 RegKind RegisterKind;
399 };
400
401 struct VectorIndexOp {
402 int Val;
403 };
404
405 struct ImmOp {
406 const MCExpr *Val;
407 };
408
409 struct ShiftedImmOp {
410 const MCExpr *Val;
411 unsigned ShiftAmount;
412 };
413
414 struct CondCodeOp {
415 AArch64CC::CondCode Code;
416 };
417
418 struct FPImmOp {
419 uint64_t Val; // APFloat value bitcasted to uint64_t.
420 bool IsExact; // describes whether parsed value was exact.
421 };
422
423 struct BarrierOp {
424 const char *Data;
425 unsigned Length;
426 unsigned Val; // Not the enum since not all values have names.
427 bool HasnXSModifier;
428 };
429
430 struct SysRegOp {
431 const char *Data;
432 unsigned Length;
433 uint32_t MRSReg;
434 uint32_t MSRReg;
435 uint32_t PStateField;
436 };
437
438 struct SysCRImmOp {
439 unsigned Val;
440 };
441
442 struct PrefetchOp {
443 const char *Data;
444 unsigned Length;
445 unsigned Val;
446 };
447
448 struct PSBHintOp {
449 const char *Data;
450 unsigned Length;
451 unsigned Val;
452 };
453
454 struct BTIHintOp {
455 const char *Data;
456 unsigned Length;
457 unsigned Val;
458 };
459
460 struct SVCROp {
461 const char *Data;
462 unsigned Length;
463 unsigned PStateField;
464 };
465
466 union {
467 struct TokOp Tok;
468 struct RegOp Reg;
469 struct MatrixRegOp MatrixReg;
470 struct MatrixTileListOp MatrixTileList;
471 struct VectorListOp VectorList;
472 struct VectorIndexOp VectorIndex;
473 struct ImmOp Imm;
474 struct ShiftedImmOp ShiftedImm;
475 struct CondCodeOp CondCode;
476 struct FPImmOp FPImm;
477 struct BarrierOp Barrier;
478 struct SysRegOp SysReg;
479 struct SysCRImmOp SysCRImm;
480 struct PrefetchOp Prefetch;
481 struct PSBHintOp PSBHint;
482 struct BTIHintOp BTIHint;
483 struct ShiftExtendOp ShiftExtend;
484 struct SVCROp SVCR;
485 };
486
487 // Keep the MCContext around as the MCExprs may need manipulated during
488 // the add<>Operands() calls.
489 MCContext &Ctx;
490
491public:
492 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
493
494 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
495 Kind = o.Kind;
496 StartLoc = o.StartLoc;
497 EndLoc = o.EndLoc;
498 switch (Kind) {
499 case k_Token:
500 Tok = o.Tok;
501 break;
502 case k_Immediate:
503 Imm = o.Imm;
504 break;
505 case k_ShiftedImm:
506 ShiftedImm = o.ShiftedImm;
507 break;
508 case k_CondCode:
509 CondCode = o.CondCode;
510 break;
511 case k_FPImm:
512 FPImm = o.FPImm;
513 break;
514 case k_Barrier:
515 Barrier = o.Barrier;
516 break;
517 case k_Register:
518 Reg = o.Reg;
519 break;
520 case k_MatrixRegister:
521 MatrixReg = o.MatrixReg;
522 break;
523 case k_MatrixTileList:
524 MatrixTileList = o.MatrixTileList;
525 break;
526 case k_VectorList:
527 VectorList = o.VectorList;
528 break;
529 case k_VectorIndex:
530 VectorIndex = o.VectorIndex;
531 break;
532 case k_SysReg:
533 SysReg = o.SysReg;
534 break;
535 case k_SysCR:
536 SysCRImm = o.SysCRImm;
537 break;
538 case k_Prefetch:
539 Prefetch = o.Prefetch;
540 break;
541 case k_PSBHint:
542 PSBHint = o.PSBHint;
543 break;
544 case k_BTIHint:
545 BTIHint = o.BTIHint;
546 break;
547 case k_ShiftExtend:
548 ShiftExtend = o.ShiftExtend;
549 break;
550 case k_SVCR:
551 SVCR = o.SVCR;
552 break;
553 }
554 }
555
556 /// getStartLoc - Get the location of the first token of this operand.
557 SMLoc getStartLoc() const override { return StartLoc; }
558 /// getEndLoc - Get the location of the last token of this operand.
559 SMLoc getEndLoc() const override { return EndLoc; }
560
561 StringRef getToken() const {
562 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 562
, __extension__ __PRETTY_FUNCTION__))
;
563 return StringRef(Tok.Data, Tok.Length);
564 }
565
566 bool isTokenSuffix() const {
567 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 567
, __extension__ __PRETTY_FUNCTION__))
;
568 return Tok.IsSuffix;
569 }
570
571 const MCExpr *getImm() const {
572 assert(Kind == k_Immediate && "Invalid access!")(static_cast <bool> (Kind == k_Immediate && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 572
, __extension__ __PRETTY_FUNCTION__))
;
573 return Imm.Val;
574 }
575
576 const MCExpr *getShiftedImmVal() const {
577 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 577
, __extension__ __PRETTY_FUNCTION__))
;
578 return ShiftedImm.Val;
579 }
580
581 unsigned getShiftedImmShift() const {
582 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 582
, __extension__ __PRETTY_FUNCTION__))
;
583 return ShiftedImm.ShiftAmount;
584 }
585
586 AArch64CC::CondCode getCondCode() const {
587 assert(Kind == k_CondCode && "Invalid access!")(static_cast <bool> (Kind == k_CondCode && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 587
, __extension__ __PRETTY_FUNCTION__))
;
588 return CondCode.Code;
589 }
590
591 APFloat getFPImm() const {
592 assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 592
, __extension__ __PRETTY_FUNCTION__))
;
593 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
594 }
595
596 bool getFPImmIsExact() const {
597 assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 597
, __extension__ __PRETTY_FUNCTION__))
;
598 return FPImm.IsExact;
599 }
600
601 unsigned getBarrier() const {
602 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 602
, __extension__ __PRETTY_FUNCTION__))
;
603 return Barrier.Val;
604 }
605
606 StringRef getBarrierName() const {
607 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 607
, __extension__ __PRETTY_FUNCTION__))
;
608 return StringRef(Barrier.Data, Barrier.Length);
609 }
610
611 bool getBarriernXSModifier() const {
612 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 612
, __extension__ __PRETTY_FUNCTION__))
;
613 return Barrier.HasnXSModifier;
614 }
615
616 unsigned getReg() const override {
617 assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 617
, __extension__ __PRETTY_FUNCTION__))
;
618 return Reg.RegNum;
619 }
620
621 unsigned getMatrixReg() const {
622 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 622
, __extension__ __PRETTY_FUNCTION__))
;
623 return MatrixReg.RegNum;
624 }
625
626 unsigned getMatrixElementWidth() const {
627 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 627
, __extension__ __PRETTY_FUNCTION__))
;
628 return MatrixReg.ElementWidth;
629 }
630
631 MatrixKind getMatrixKind() const {
632 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 632
, __extension__ __PRETTY_FUNCTION__))
;
633 return MatrixReg.Kind;
634 }
635
636 unsigned getMatrixTileListRegMask() const {
637 assert(isMatrixTileList() && "Invalid access!")(static_cast <bool> (isMatrixTileList() && "Invalid access!"
) ? void (0) : __assert_fail ("isMatrixTileList() && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 637
, __extension__ __PRETTY_FUNCTION__))
;
638 return MatrixTileList.RegMask;
639 }
640
641 RegConstraintEqualityTy getRegEqualityTy() const {
642 assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 642
, __extension__ __PRETTY_FUNCTION__))
;
643 return Reg.EqualityTy;
644 }
645
646 unsigned getVectorListStart() const {
647 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 647
, __extension__ __PRETTY_FUNCTION__))
;
648 return VectorList.RegNum;
649 }
650
651 unsigned getVectorListCount() const {
652 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 652
, __extension__ __PRETTY_FUNCTION__))
;
653 return VectorList.Count;
654 }
655
656 int getVectorIndex() const {
657 assert(Kind == k_VectorIndex && "Invalid access!")(static_cast <bool> (Kind == k_VectorIndex && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 657
, __extension__ __PRETTY_FUNCTION__))
;
658 return VectorIndex.Val;
659 }
660
661 StringRef getSysReg() const {
662 assert(Kind == k_SysReg && "Invalid access!")(static_cast <bool> (Kind == k_SysReg && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 662
, __extension__ __PRETTY_FUNCTION__))
;
663 return StringRef(SysReg.Data, SysReg.Length);
664 }
665
666 unsigned getSysCR() const {
667 assert(Kind == k_SysCR && "Invalid access!")(static_cast <bool> (Kind == k_SysCR && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 667
, __extension__ __PRETTY_FUNCTION__))
;
668 return SysCRImm.Val;
669 }
670
671 unsigned getPrefetch() const {
672 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 672
, __extension__ __PRETTY_FUNCTION__))
;
673 return Prefetch.Val;
674 }
675
676 unsigned getPSBHint() const {
677 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 677
, __extension__ __PRETTY_FUNCTION__))
;
678 return PSBHint.Val;
679 }
680
681 StringRef getPSBHintName() const {
682 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 682
, __extension__ __PRETTY_FUNCTION__))
;
683 return StringRef(PSBHint.Data, PSBHint.Length);
684 }
685
686 unsigned getBTIHint() const {
687 assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 687
, __extension__ __PRETTY_FUNCTION__))
;
688 return BTIHint.Val;
689 }
690
691 StringRef getBTIHintName() const {
692 assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 692
, __extension__ __PRETTY_FUNCTION__))
;
693 return StringRef(BTIHint.Data, BTIHint.Length);
694 }
695
696 StringRef getSVCR() const {
697 assert(Kind == k_SVCR && "Invalid access!")(static_cast <bool> (Kind == k_SVCR && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SVCR && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 697
, __extension__ __PRETTY_FUNCTION__))
;
698 return StringRef(SVCR.Data, SVCR.Length);
699 }
700
701 StringRef getPrefetchName() const {
702 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 702
, __extension__ __PRETTY_FUNCTION__))
;
703 return StringRef(Prefetch.Data, Prefetch.Length);
704 }
705
706 AArch64_AM::ShiftExtendType getShiftExtendType() const {
707 if (Kind == k_ShiftExtend)
708 return ShiftExtend.Type;
709 if (Kind == k_Register)
710 return Reg.ShiftExtend.Type;
711 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 711)
;
712 }
713
714 unsigned getShiftExtendAmount() const {
715 if (Kind == k_ShiftExtend)
716 return ShiftExtend.Amount;
717 if (Kind == k_Register)
718 return Reg.ShiftExtend.Amount;
719 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 719)
;
720 }
721
722 bool hasShiftExtendAmount() const {
723 if (Kind == k_ShiftExtend)
724 return ShiftExtend.HasExplicitAmount;
725 if (Kind == k_Register)
726 return Reg.ShiftExtend.HasExplicitAmount;
727 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 727)
;
728 }
729
730 bool isImm() const override { return Kind == k_Immediate; }
731 bool isMem() const override { return false; }
732
733 bool isUImm6() const {
734 if (!isImm())
735 return false;
736 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
737 if (!MCE)
738 return false;
739 int64_t Val = MCE->getValue();
740 return (Val >= 0 && Val < 64);
741 }
742
743 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
744
745 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
746 return isImmScaled<Bits, Scale>(true);
747 }
748
749 template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
750 return isImmScaled<Bits, Scale>(false);
751 }
752
753 template <int Bits, int Scale>
754 DiagnosticPredicate isImmScaled(bool Signed) const {
755 if (!isImm())
756 return DiagnosticPredicateTy::NoMatch;
757
758 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
759 if (!MCE)
760 return DiagnosticPredicateTy::NoMatch;
761
762 int64_t MinVal, MaxVal;
763 if (Signed) {
764 int64_t Shift = Bits - 1;
765 MinVal = (int64_t(1) << Shift) * -Scale;
766 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
767 } else {
768 MinVal = 0;
769 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
770 }
771
772 int64_t Val = MCE->getValue();
773 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
774 return DiagnosticPredicateTy::Match;
775
776 return DiagnosticPredicateTy::NearMatch;
777 }
778
779 DiagnosticPredicate isSVEPattern() const {
780 if (!isImm())
781 return DiagnosticPredicateTy::NoMatch;
782 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
783 if (!MCE)
784 return DiagnosticPredicateTy::NoMatch;
785 int64_t Val = MCE->getValue();
786 if (Val >= 0 && Val < 32)
787 return DiagnosticPredicateTy::Match;
788 return DiagnosticPredicateTy::NearMatch;
789 }
790
791 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
792 AArch64MCExpr::VariantKind ELFRefKind;
793 MCSymbolRefExpr::VariantKind DarwinRefKind;
794 int64_t Addend;
795 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
796 Addend)) {
797 // If we don't understand the expression, assume the best and
798 // let the fixup and relocation code deal with it.
799 return true;
800 }
801
802 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
803 ELFRefKind == AArch64MCExpr::VK_LO12 ||
804 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
805 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
806 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
807 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
808 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
809 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
810 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
811 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
812 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
813 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
814 // Note that we don't range-check the addend. It's adjusted modulo page
815 // size when converted, so there is no "out of range" condition when using
816 // @pageoff.
817 return true;
818 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
819 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
820 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
821 return Addend == 0;
822 }
823
824 return false;
825 }
826
827 template <int Scale> bool isUImm12Offset() const {
828 if (!isImm())
829 return false;
830
831 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
832 if (!MCE)
833 return isSymbolicUImm12Offset(getImm());
834
835 int64_t Val = MCE->getValue();
836 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
837 }
838
839 template <int N, int M>
840 bool isImmInRange() const {
841 if (!isImm())
842 return false;
843 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
844 if (!MCE)
845 return false;
846 int64_t Val = MCE->getValue();
847 return (Val >= N && Val <= M);
848 }
849
850 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
851 // a logical immediate can always be represented when inverted.
852 template <typename T>
853 bool isLogicalImm() const {
854 if (!isImm())
855 return false;
856 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
857 if (!MCE)
858 return false;
859
860 int64_t Val = MCE->getValue();
861 // Avoid left shift by 64 directly.
862 uint64_t Upper = UINT64_C(-1)-1UL << (sizeof(T) * 4) << (sizeof(T) * 4);
863 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
864 if ((Val & Upper) && (Val & Upper) != Upper)
865 return false;
866
867 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
868 }
869
870 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
871
872 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
873 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
874 /// immediate that can be shifted by 'Shift'.
875 template <unsigned Width>
876 Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
877 if (isShiftedImm() && Width == getShiftedImmShift())
878 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
879 return std::make_pair(CE->getValue(), Width);
880
881 if (isImm())
882 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
883 int64_t Val = CE->getValue();
884 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
885 return std::make_pair(Val >> Width, Width);
886 else
887 return std::make_pair(Val, 0u);
888 }
889
890 return {};
891 }
892
893 bool isAddSubImm() const {
894 if (!isShiftedImm() && !isImm())
895 return false;
896
897 const MCExpr *Expr;
898
899 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
900 if (isShiftedImm()) {
901 unsigned Shift = ShiftedImm.ShiftAmount;
902 Expr = ShiftedImm.Val;
903 if (Shift != 0 && Shift != 12)
904 return false;
905 } else {
906 Expr = getImm();
907 }
908
909 AArch64MCExpr::VariantKind ELFRefKind;
910 MCSymbolRefExpr::VariantKind DarwinRefKind;
911 int64_t Addend;
912 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
913 DarwinRefKind, Addend)) {
914 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
915 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
916 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
917 || ELFRefKind == AArch64MCExpr::VK_LO12
918 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
919 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
920 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
921 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
922 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
923 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
924 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
925 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
926 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
927 }
928
929 // If it's a constant, it should be a real immediate in range.
930 if (auto ShiftedVal = getShiftedVal<12>())
931 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
932
933 // If it's an expression, we hope for the best and let the fixup/relocation
934 // code deal with it.
935 return true;
936 }
937
938 bool isAddSubImmNeg() const {
939 if (!isShiftedImm() && !isImm())
940 return false;
941
942 // Otherwise it should be a real negative immediate in range.
943 if (auto ShiftedVal = getShiftedVal<12>())
944 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
945
946 return false;
947 }
948
949 // Signed value in the range -128 to +127. For element widths of
950 // 16 bits or higher it may also be a signed multiple of 256 in the
951 // range -32768 to +32512.
952 // For element-width of 8 bits a range of -128 to 255 is accepted,
953 // since a copy of a byte can be either signed/unsigned.
954 template <typename T>
955 DiagnosticPredicate isSVECpyImm() const {
956 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
957 return DiagnosticPredicateTy::NoMatch;
958
959 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
960 std::is_same<int8_t, T>::value;
961 if (auto ShiftedImm = getShiftedVal<8>())
962 if (!(IsByte && ShiftedImm->second) &&
963 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
964 << ShiftedImm->second))
965 return DiagnosticPredicateTy::Match;
966
967 return DiagnosticPredicateTy::NearMatch;
968 }
969
970 // Unsigned value in the range 0 to 255. For element widths of
971 // 16 bits or higher it may also be a signed multiple of 256 in the
972 // range 0 to 65280.
973 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
974 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
975 return DiagnosticPredicateTy::NoMatch;
976
977 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
978 std::is_same<int8_t, T>::value;
979 if (auto ShiftedImm = getShiftedVal<8>())
980 if (!(IsByte && ShiftedImm->second) &&
981 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
982 << ShiftedImm->second))
983 return DiagnosticPredicateTy::Match;
984
985 return DiagnosticPredicateTy::NearMatch;
986 }
987
988 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
989 if (isLogicalImm<T>() && !isSVECpyImm<T>())
990 return DiagnosticPredicateTy::Match;
991 return DiagnosticPredicateTy::NoMatch;
992 }
993
994 bool isCondCode() const { return Kind == k_CondCode; }
995
996 bool isSIMDImmType10() const {
997 if (!isImm())
998 return false;
999 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1000 if (!MCE)
1001 return false;
1002 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
1003 }
1004
1005 template<int N>
1006 bool isBranchTarget() const {
1007 if (!isImm())
1008 return false;
1009 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1010 if (!MCE)
1011 return true;
1012 int64_t Val = MCE->getValue();
1013 if (Val & 0x3)
1014 return false;
1015 assert(N > 0 && "Branch target immediate cannot be 0 bits!")(static_cast <bool> (N > 0 && "Branch target immediate cannot be 0 bits!"
) ? void (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1015
, __extension__ __PRETTY_FUNCTION__))
;
1016 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1017 }
1018
1019 bool
1020 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1021 if (!isImm())
1022 return false;
1023
1024 AArch64MCExpr::VariantKind ELFRefKind;
1025 MCSymbolRefExpr::VariantKind DarwinRefKind;
1026 int64_t Addend;
1027 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1028 DarwinRefKind, Addend)) {
1029 return false;
1030 }
1031 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1032 return false;
1033
1034 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1035 }
1036
1037 bool isMovWSymbolG3() const {
1038 return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
1039 }
1040
1041 bool isMovWSymbolG2() const {
1042 return isMovWSymbol(
1043 {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
1044 AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
1045 AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
1046 AArch64MCExpr::VK_DTPREL_G2});
1047 }
1048
1049 bool isMovWSymbolG1() const {
1050 return isMovWSymbol(
1051 {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
1052 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
1053 AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
1054 AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
1055 AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
1056 }
1057
1058 bool isMovWSymbolG0() const {
1059 return isMovWSymbol(
1060 {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
1061 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
1062 AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
1063 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
1064 AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
1065 }
1066
1067 template<int RegWidth, int Shift>
1068 bool isMOVZMovAlias() const {
1069 if (!isImm()) return false;
1070
1071 const MCExpr *E = getImm();
1072 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1073 uint64_t Value = CE->getValue();
1074
1075 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1076 }
1077 // Only supports the case of Shift being 0 if an expression is used as an
1078 // operand
1079 return !Shift && E;
1080 }
1081
1082 template<int RegWidth, int Shift>
1083 bool isMOVNMovAlias() const {
1084 if (!isImm()) return false;
1085
1086 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1087 if (!CE) return false;
1088 uint64_t Value = CE->getValue();
1089
1090 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1091 }
1092
1093 bool isFPImm() const {
1094 return Kind == k_FPImm &&
1095 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1096 }
1097
1098 bool isBarrier() const {
1099 return Kind == k_Barrier && !getBarriernXSModifier();
1100 }
1101 bool isBarriernXS() const {
1102 return Kind == k_Barrier && getBarriernXSModifier();
1103 }
1104 bool isSysReg() const { return Kind == k_SysReg; }
1105
1106 bool isMRSSystemRegister() const {
1107 if (!isSysReg()) return false;
1108
1109 return SysReg.MRSReg != -1U;
1110 }
1111
1112 bool isMSRSystemRegister() const {
1113 if (!isSysReg()) return false;
1114 return SysReg.MSRReg != -1U;
1115 }
1116
1117 bool isSystemPStateFieldWithImm0_1() const {
1118 if (!isSysReg()) return false;
1119 return (SysReg.PStateField == AArch64PState::PAN ||
1120 SysReg.PStateField == AArch64PState::DIT ||
1121 SysReg.PStateField == AArch64PState::UAO ||
1122 SysReg.PStateField == AArch64PState::SSBS);
1123 }
1124
1125 bool isSystemPStateFieldWithImm0_15() const {
1126 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1127 return SysReg.PStateField != -1U;
1128 }
1129
1130 bool isSVCR() const {
1131 if (Kind != k_SVCR)
1132 return false;
1133 return SVCR.PStateField != -1U;
1134 }
1135
1136 bool isReg() const override {
1137 return Kind == k_Register;
1138 }
1139
1140 bool isScalarReg() const {
1141 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1142 }
1143
1144 bool isNeonVectorReg() const {
1145 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1146 }
1147
1148 bool isNeonVectorRegLo() const {
1149 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1150 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1151 Reg.RegNum) ||
1152 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1153 Reg.RegNum));
1154 }
1155
1156 bool isMatrix() const { return Kind == k_MatrixRegister; }
1157 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1158
1159 template <unsigned Class> bool isSVEVectorReg() const {
1160 RegKind RK;
1161 switch (Class) {
1162 case AArch64::ZPRRegClassID:
1163 case AArch64::ZPR_3bRegClassID:
1164 case AArch64::ZPR_4bRegClassID:
1165 RK = RegKind::SVEDataVector;
1166 break;
1167 case AArch64::PPRRegClassID:
1168 case AArch64::PPR_3bRegClassID:
1169 RK = RegKind::SVEPredicateVector;
1170 break;
1171 default:
1172 llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class",
"llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1172
)
;
1173 }
1174
1175 return (Kind == k_Register && Reg.Kind == RK) &&
1176 AArch64MCRegisterClasses[Class].contains(getReg());
1177 }
1178
1179 template <unsigned Class> bool isFPRasZPR() const {
1180 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1181 AArch64MCRegisterClasses[Class].contains(getReg());
1182 }
1183
1184 template <int ElementWidth, unsigned Class>
1185 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1186 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1187 return DiagnosticPredicateTy::NoMatch;
1188
1189 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1190 return DiagnosticPredicateTy::Match;
1191
1192 return DiagnosticPredicateTy::NearMatch;
1193 }
1194
1195 template <int ElementWidth, unsigned Class>
1196 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1197 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1198 return DiagnosticPredicateTy::NoMatch;
1199
1200 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1201 return DiagnosticPredicateTy::Match;
1202
1203 return DiagnosticPredicateTy::NearMatch;
1204 }
1205
1206 template <int ElementWidth, unsigned Class,
1207 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1208 bool ShiftWidthAlwaysSame>
1209 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1210 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1211 if (!VectorMatch.isMatch())
1212 return DiagnosticPredicateTy::NoMatch;
1213
1214 // Give a more specific diagnostic when the user has explicitly typed in
1215 // a shift-amount that does not match what is expected, but for which
1216 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1217 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1218 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1219 ShiftExtendTy == AArch64_AM::SXTW) &&
1220 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1221 return DiagnosticPredicateTy::NoMatch;
1222
1223 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1224 return DiagnosticPredicateTy::Match;
1225
1226 return DiagnosticPredicateTy::NearMatch;
1227 }
1228
1229 bool isGPR32as64() const {
1230 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1231 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1232 }
1233
1234 bool isGPR64as32() const {
1235 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1236 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1237 }
1238
1239 bool isGPR64x8() const {
1240 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1241 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1242 Reg.RegNum);
1243 }
1244
1245 bool isWSeqPair() const {
1246 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1247 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1248 Reg.RegNum);
1249 }
1250
1251 bool isXSeqPair() const {
1252 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1253 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1254 Reg.RegNum);
1255 }
1256
1257 template<int64_t Angle, int64_t Remainder>
1258 DiagnosticPredicate isComplexRotation() const {
1259 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1260
1261 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1262 if (!CE) return DiagnosticPredicateTy::NoMatch;
1263 uint64_t Value = CE->getValue();
1264
1265 if (Value % Angle == Remainder && Value <= 270)
1266 return DiagnosticPredicateTy::Match;
1267 return DiagnosticPredicateTy::NearMatch;
1268 }
1269
1270 template <unsigned RegClassID> bool isGPR64() const {
1271 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1272 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1273 }
1274
1275 template <unsigned RegClassID, int ExtWidth>
1276 DiagnosticPredicate isGPR64WithShiftExtend() const {
1277 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1278 return DiagnosticPredicateTy::NoMatch;
1279
1280 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1281 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1282 return DiagnosticPredicateTy::Match;
1283 return DiagnosticPredicateTy::NearMatch;
1284 }
1285
1286 /// Is this a vector list with the type implicit (presumably attached to the
1287 /// instruction itself)?
1288 template <RegKind VectorKind, unsigned NumRegs>
1289 bool isImplicitlyTypedVectorList() const {
1290 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1291 VectorList.NumElements == 0 &&
1292 VectorList.RegisterKind == VectorKind;
1293 }
1294
1295 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1296 unsigned ElementWidth>
1297 bool isTypedVectorList() const {
1298 if (Kind != k_VectorList)
1299 return false;
1300 if (VectorList.Count != NumRegs)
1301 return false;
1302 if (VectorList.RegisterKind != VectorKind)
1303 return false;
1304 if (VectorList.ElementWidth != ElementWidth)
1305 return false;
1306 return VectorList.NumElements == NumElements;
1307 }
1308
1309 template <int Min, int Max>
1310 DiagnosticPredicate isVectorIndex() const {
1311 if (Kind != k_VectorIndex)
1312 return DiagnosticPredicateTy::NoMatch;
1313 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1314 return DiagnosticPredicateTy::Match;
1315 return DiagnosticPredicateTy::NearMatch;
1316 }
1317
1318 bool isToken() const override { return Kind == k_Token; }
1319
1320 bool isTokenEqual(StringRef Str) const {
1321 return Kind == k_Token && getToken() == Str;
1322 }
1323 bool isSysCR() const { return Kind == k_SysCR; }
1324 bool isPrefetch() const { return Kind == k_Prefetch; }
1325 bool isPSBHint() const { return Kind == k_PSBHint; }
1326 bool isBTIHint() const { return Kind == k_BTIHint; }
1327 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1328 bool isShifter() const {
1329 if (!isShiftExtend())
1330 return false;
1331
1332 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1333 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1334 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1335 ST == AArch64_AM::MSL);
1336 }
1337
1338 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1339 if (Kind != k_FPImm)
1340 return DiagnosticPredicateTy::NoMatch;
1341
1342 if (getFPImmIsExact()) {
1343 // Lookup the immediate from table of supported immediates.
1344 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1345 assert(Desc && "Unknown enum value")(static_cast <bool> (Desc && "Unknown enum value"
) ? void (0) : __assert_fail ("Desc && \"Unknown enum value\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1345
, __extension__ __PRETTY_FUNCTION__))
;
1346
1347 // Calculate its FP value.
1348 APFloat RealVal(APFloat::IEEEdouble());
1349 auto StatusOrErr =
1350 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1351 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1352 llvm_unreachable("FP immediate is not exact")::llvm::llvm_unreachable_internal("FP immediate is not exact"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1352
)
;
1353
1354 if (getFPImm().bitwiseIsEqual(RealVal))
1355 return DiagnosticPredicateTy::Match;
1356 }
1357
1358 return DiagnosticPredicateTy::NearMatch;
1359 }
1360
1361 template <unsigned ImmA, unsigned ImmB>
1362 DiagnosticPredicate isExactFPImm() const {
1363 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1364 if ((Res = isExactFPImm<ImmA>()))
1365 return DiagnosticPredicateTy::Match;
1366 if ((Res = isExactFPImm<ImmB>()))
1367 return DiagnosticPredicateTy::Match;
1368 return Res;
1369 }
1370
1371 bool isExtend() const {
1372 if (!isShiftExtend())
1373 return false;
1374
1375 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1376 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1377 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1378 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1379 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1380 ET == AArch64_AM::LSL) &&
1381 getShiftExtendAmount() <= 4;
1382 }
1383
1384 bool isExtend64() const {
1385 if (!isExtend())
1386 return false;
1387 // Make sure the extend expects a 32-bit source register.
1388 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1389 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1390 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1391 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1392 }
1393
1394 bool isExtendLSL64() const {
1395 if (!isExtend())
1396 return false;
1397 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1398 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1399 ET == AArch64_AM::LSL) &&
1400 getShiftExtendAmount() <= 4;
1401 }
1402
1403 template<int Width> bool isMemXExtend() const {
1404 if (!isExtend())
1405 return false;
1406 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1407 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1408 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1409 getShiftExtendAmount() == 0);
1410 }
1411
1412 template<int Width> bool isMemWExtend() const {
1413 if (!isExtend())
1414 return false;
1415 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1416 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1417 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1418 getShiftExtendAmount() == 0);
1419 }
1420
1421 template <unsigned width>
1422 bool isArithmeticShifter() const {
1423 if (!isShifter())
1424 return false;
1425
1426 // An arithmetic shifter is LSL, LSR, or ASR.
1427 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1428 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1429 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1430 }
1431
1432 template <unsigned width>
1433 bool isLogicalShifter() const {
1434 if (!isShifter())
1435 return false;
1436
1437 // A logical shifter is LSL, LSR, ASR or ROR.
1438 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1439 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1440 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1441 getShiftExtendAmount() < width;
1442 }
1443
1444 bool isMovImm32Shifter() const {
1445 if (!isShifter())
1446 return false;
1447
1448 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1449 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1450 if (ST != AArch64_AM::LSL)
1451 return false;
1452 uint64_t Val = getShiftExtendAmount();
1453 return (Val == 0 || Val == 16);
1454 }
1455
1456 bool isMovImm64Shifter() const {
1457 if (!isShifter())
1458 return false;
1459
1460 // A MOVi shifter is LSL of 0 or 16.
1461 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1462 if (ST != AArch64_AM::LSL)
1463 return false;
1464 uint64_t Val = getShiftExtendAmount();
1465 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1466 }
1467
1468 bool isLogicalVecShifter() const {
1469 if (!isShifter())
1470 return false;
1471
1472 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1473 unsigned Shift = getShiftExtendAmount();
1474 return getShiftExtendType() == AArch64_AM::LSL &&
1475 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1476 }
1477
1478 bool isLogicalVecHalfWordShifter() const {
1479 if (!isLogicalVecShifter())
1480 return false;
1481
1482 // A logical vector shifter is a left shift by 0 or 8.
1483 unsigned Shift = getShiftExtendAmount();
1484 return getShiftExtendType() == AArch64_AM::LSL &&
1485 (Shift == 0 || Shift == 8);
1486 }
1487
1488 bool isMoveVecShifter() const {
1489 if (!isShiftExtend())
1490 return false;
1491
1492 // A logical vector shifter is a left shift by 8 or 16.
1493 unsigned Shift = getShiftExtendAmount();
1494 return getShiftExtendType() == AArch64_AM::MSL &&
1495 (Shift == 8 || Shift == 16);
1496 }
1497
1498 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1499 // to LDUR/STUR when the offset is not legal for the former but is for
1500 // the latter. As such, in addition to checking for being a legal unscaled
1501 // address, also check that it is not a legal scaled address. This avoids
1502 // ambiguity in the matcher.
1503 template<int Width>
1504 bool isSImm9OffsetFB() const {
1505 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1506 }
1507
1508 bool isAdrpLabel() const {
1509 // Validation was handled during parsing, so we just verify that
1510 // something didn't go haywire.
1511 if (!isImm())
1512 return false;
1513
1514 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1515 int64_t Val = CE->getValue();
1516 int64_t Min = - (4096 * (1LL << (21 - 1)));
1517 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1518 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1519 }
1520
1521 return true;
1522 }
1523
1524 bool isAdrLabel() const {
1525 // Validation was handled during parsing, so we just verify that
1526 // something didn't go haywire.
1527 if (!isImm())
1528 return false;
1529
1530 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1531 int64_t Val = CE->getValue();
1532 int64_t Min = - (1LL << (21 - 1));
1533 int64_t Max = ((1LL << (21 - 1)) - 1);
1534 return Val >= Min && Val <= Max;
1535 }
1536
1537 return true;
1538 }
1539
1540 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1541 DiagnosticPredicate isMatrixRegOperand() const {
1542 if (!isMatrix())
1543 return DiagnosticPredicateTy::NoMatch;
1544 if (getMatrixKind() != Kind ||
1545 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1546 EltSize != getMatrixElementWidth())
1547 return DiagnosticPredicateTy::NearMatch;
1548 return DiagnosticPredicateTy::Match;
1549 }
1550
1551 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1552 // Add as immediates when possible. Null MCExpr = 0.
1553 if (!Expr)
1554 Inst.addOperand(MCOperand::createImm(0));
1555 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1556 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1557 else
1558 Inst.addOperand(MCOperand::createExpr(Expr));
1559 }
1560
1561 void addRegOperands(MCInst &Inst, unsigned N) const {
1562 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1562
, __extension__ __PRETTY_FUNCTION__))
;
1563 Inst.addOperand(MCOperand::createReg(getReg()));
1564 }
1565
1566 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1567 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1567
, __extension__ __PRETTY_FUNCTION__))
;
1568 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1569 }
1570
1571 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1572 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1572
, __extension__ __PRETTY_FUNCTION__))
;
1573 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1574
, __extension__ __PRETTY_FUNCTION__))
1574 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1574
, __extension__ __PRETTY_FUNCTION__))
;
1575
1576 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1577 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1578 RI->getEncodingValue(getReg()));
1579
1580 Inst.addOperand(MCOperand::createReg(Reg));
1581 }
1582
1583 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1584 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1584
, __extension__ __PRETTY_FUNCTION__))
;
1585 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1586
, __extension__ __PRETTY_FUNCTION__))
1586 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1586
, __extension__ __PRETTY_FUNCTION__))
;
1587
1588 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1589 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1590 RI->getEncodingValue(getReg()));
1591
1592 Inst.addOperand(MCOperand::createReg(Reg));
1593 }
1594
1595 template <int Width>
1596 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1597 unsigned Base;
1598 switch (Width) {
1599 case 8: Base = AArch64::B0; break;
1600 case 16: Base = AArch64::H0; break;
1601 case 32: Base = AArch64::S0; break;
1602 case 64: Base = AArch64::D0; break;
1603 case 128: Base = AArch64::Q0; break;
1604 default:
1605 llvm_unreachable("Unsupported width")::llvm::llvm_unreachable_internal("Unsupported width", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1605)
;
1606 }
1607 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1608 }
1609
1610 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1611 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1611
, __extension__ __PRETTY_FUNCTION__))
;
1612 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1613
, __extension__ __PRETTY_FUNCTION__))
1613 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1613
, __extension__ __PRETTY_FUNCTION__))
;
1614 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1615 }
1616
1617 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1618 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1618
, __extension__ __PRETTY_FUNCTION__))
;
1619 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1620
, __extension__ __PRETTY_FUNCTION__))
1620 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1620
, __extension__ __PRETTY_FUNCTION__))
;
1621 Inst.addOperand(MCOperand::createReg(getReg()));
1622 }
1623
1624 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1625 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1625
, __extension__ __PRETTY_FUNCTION__))
;
1626 Inst.addOperand(MCOperand::createReg(getReg()));
1627 }
1628
1629 enum VecListIndexType {
1630 VecListIdx_DReg = 0,
1631 VecListIdx_QReg = 1,
1632 VecListIdx_ZReg = 2,
1633 };
1634
1635 template <VecListIndexType RegTy, unsigned NumRegs>
1636 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1637 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1637
, __extension__ __PRETTY_FUNCTION__))
;
1638 static const unsigned FirstRegs[][5] = {
1639 /* DReg */ { AArch64::Q0,
1640 AArch64::D0, AArch64::D0_D1,
1641 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1642 /* QReg */ { AArch64::Q0,
1643 AArch64::Q0, AArch64::Q0_Q1,
1644 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1645 /* ZReg */ { AArch64::Z0,
1646 AArch64::Z0, AArch64::Z0_Z1,
1647 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1648 };
1649
1650 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs
<= 4) && " NumRegs must be <= 4 for ZRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1651
, __extension__ __PRETTY_FUNCTION__))
1651 " NumRegs must be <= 4 for ZRegs")(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs
<= 4) && " NumRegs must be <= 4 for ZRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1651
, __extension__ __PRETTY_FUNCTION__))
;
1652
1653 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1654 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1655 FirstRegs[(unsigned)RegTy][0]));
1656 }
1657
1658 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1659 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1659
, __extension__ __PRETTY_FUNCTION__))
;
1660 unsigned RegMask = getMatrixTileListRegMask();
1661 assert(RegMask <= 0xFF && "Invalid mask!")(static_cast <bool> (RegMask <= 0xFF && "Invalid mask!"
) ? void (0) : __assert_fail ("RegMask <= 0xFF && \"Invalid mask!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1661
, __extension__ __PRETTY_FUNCTION__))
;
1662 Inst.addOperand(MCOperand::createImm(RegMask));
1663 }
1664
1665 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1666 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1666
, __extension__ __PRETTY_FUNCTION__))
;
1667 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1668 }
1669
1670 template <unsigned ImmIs0, unsigned ImmIs1>
1671 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1672 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1672
, __extension__ __PRETTY_FUNCTION__))
;
1673 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")(static_cast <bool> (bool(isExactFPImm<ImmIs0, ImmIs1
>()) && "Invalid operand") ? void (0) : __assert_fail
("bool(isExactFPImm<ImmIs0, ImmIs1>()) && \"Invalid operand\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1673
, __extension__ __PRETTY_FUNCTION__))
;
1674 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1675 }
1676
1677 void addImmOperands(MCInst &Inst, unsigned N) const {
1678 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1678
, __extension__ __PRETTY_FUNCTION__))
;
1679 // If this is a pageoff symrefexpr with an addend, adjust the addend
1680 // to be only the page-offset portion. Otherwise, just add the expr
1681 // as-is.
1682 addExpr(Inst, getImm());
1683 }
1684
1685 template <int Shift>
1686 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1687 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1687
, __extension__ __PRETTY_FUNCTION__))
;
1688 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1689 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1690 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1691 } else if (isShiftedImm()) {
1692 addExpr(Inst, getShiftedImmVal());
1693 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1694 } else {
1695 addExpr(Inst, getImm());
1696 Inst.addOperand(MCOperand::createImm(0));
1697 }
1698 }
1699
1700 template <int Shift>
1701 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1702 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1702
, __extension__ __PRETTY_FUNCTION__))
;
1703 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1704 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1705 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1706 } else
1707 llvm_unreachable("Not a shifted negative immediate")::llvm::llvm_unreachable_internal("Not a shifted negative immediate"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1707
)
;
1708 }
1709
1710 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1711 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1711
, __extension__ __PRETTY_FUNCTION__))
;
1712 Inst.addOperand(MCOperand::createImm(getCondCode()));
1713 }
1714
1715 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1716 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1716
, __extension__ __PRETTY_FUNCTION__))
;
1717 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1718 if (!MCE)
1719 addExpr(Inst, getImm());
1720 else
1721 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1722 }
1723
1724 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1725 addImmOperands(Inst, N);
1726 }
1727
1728 template<int Scale>
1729 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1730 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1730
, __extension__ __PRETTY_FUNCTION__))
;
1731 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1732
1733 if (!MCE) {
1734 Inst.addOperand(MCOperand::createExpr(getImm()));
1735 return;
1736 }
1737 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1738 }
1739
1740 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1741 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1741
, __extension__ __PRETTY_FUNCTION__))
;
1742 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1743 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1744 }
1745
1746 template <int Scale>
1747 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1748 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1748
, __extension__ __PRETTY_FUNCTION__))
;
1749 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1750 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1751 }
1752
1753 template <typename T>
1754 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1755 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1755
, __extension__ __PRETTY_FUNCTION__))
;
1756 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1757 std::make_unsigned_t<T> Val = MCE->getValue();
1758 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1759 Inst.addOperand(MCOperand::createImm(encoding));
1760 }
1761
1762 template <typename T>
1763 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1764 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1764
, __extension__ __PRETTY_FUNCTION__))
;
1765 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1766 std::make_unsigned_t<T> Val = ~MCE->getValue();
1767 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1768 Inst.addOperand(MCOperand::createImm(encoding));
1769 }
1770
1771 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1772 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1772
, __extension__ __PRETTY_FUNCTION__))
;
1773 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1774 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1775 Inst.addOperand(MCOperand::createImm(encoding));
1776 }
1777
1778 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1779 // Branch operands don't encode the low bits, so shift them off
1780 // here. If it's a label, however, just put it on directly as there's
1781 // not enough information now to do anything.
1782 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1782
, __extension__ __PRETTY_FUNCTION__))
;
1783 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1784 if (!MCE) {
1785 addExpr(Inst, getImm());
1786 return;
1787 }
1788 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1788
, __extension__ __PRETTY_FUNCTION__))
;
1789 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1790 }
1791
1792 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1793 // Branch operands don't encode the low bits, so shift them off
1794 // here. If it's a label, however, just put it on directly as there's
1795 // not enough information now to do anything.
1796 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1796
, __extension__ __PRETTY_FUNCTION__))
;
1797 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1798 if (!MCE) {
1799 addExpr(Inst, getImm());
1800 return;
1801 }
1802 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1802
, __extension__ __PRETTY_FUNCTION__))
;
1803 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1804 }
1805
1806 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1807 // Branch operands don't encode the low bits, so shift them off
1808 // here. If it's a label, however, just put it on directly as there's
1809 // not enough information now to do anything.
1810 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1810
, __extension__ __PRETTY_FUNCTION__))
;
1811 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1812 if (!MCE) {
1813 addExpr(Inst, getImm());
1814 return;
1815 }
1816 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1816
, __extension__ __PRETTY_FUNCTION__))
;
1817 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1818 }
1819
1820 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1821 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1821
, __extension__ __PRETTY_FUNCTION__))
;
1822 Inst.addOperand(MCOperand::createImm(
1823 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1824 }
1825
1826 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1827 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1827
, __extension__ __PRETTY_FUNCTION__))
;
1828 Inst.addOperand(MCOperand::createImm(getBarrier()));
1829 }
1830
1831 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
1832 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1832
, __extension__ __PRETTY_FUNCTION__))
;
1833 Inst.addOperand(MCOperand::createImm(getBarrier()));
1834 }
1835
1836 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1837 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1837
, __extension__ __PRETTY_FUNCTION__))
;
1838
1839 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1840 }
1841
1842 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1843 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1843
, __extension__ __PRETTY_FUNCTION__))
;
1844
1845 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1846 }
1847
1848 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1849 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1849
, __extension__ __PRETTY_FUNCTION__))
;
1850
1851 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1852 }
1853
1854 void addSVCROperands(MCInst &Inst, unsigned N) const {
1855 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1855
, __extension__ __PRETTY_FUNCTION__))
;
1856
1857 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
1858 }
1859
1860 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1861 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1861
, __extension__ __PRETTY_FUNCTION__))
;
1862
1863 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1864 }
1865
1866 void addSysCROperands(MCInst &Inst, unsigned N) const {
1867 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1867
, __extension__ __PRETTY_FUNCTION__))
;
1868 Inst.addOperand(MCOperand::createImm(getSysCR()));
1869 }
1870
1871 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1872 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1872
, __extension__ __PRETTY_FUNCTION__))
;
1873 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1874 }
1875
1876 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1877 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1877
, __extension__ __PRETTY_FUNCTION__))
;
1878 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1879 }
1880
1881 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1882 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1882
, __extension__ __PRETTY_FUNCTION__))
;
1883 Inst.addOperand(MCOperand::createImm(getBTIHint()));
1884 }
1885
1886 void addShifterOperands(MCInst &Inst, unsigned N) const {
1887 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1887
, __extension__ __PRETTY_FUNCTION__))
;
1888 unsigned Imm =
1889 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1890 Inst.addOperand(MCOperand::createImm(Imm));
1891 }
1892
1893 void addExtendOperands(MCInst &Inst, unsigned N) const {
1894 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1894
, __extension__ __PRETTY_FUNCTION__))
;
1895 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1896 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1897 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1898 Inst.addOperand(MCOperand::createImm(Imm));
1899 }
1900
1901 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1902 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1902
, __extension__ __PRETTY_FUNCTION__))
;
1903 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1904 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1905 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1906 Inst.addOperand(MCOperand::createImm(Imm));
1907 }
1908
1909 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1910 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1910
, __extension__ __PRETTY_FUNCTION__))
;
1911 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1912 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1913 Inst.addOperand(MCOperand::createImm(IsSigned));
1914 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1915 }
1916
1917 // For 8-bit load/store instructions with a register offset, both the
1918 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1919 // they're disambiguated by whether the shift was explicit or implicit rather
1920 // than its size.
1921 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1922 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1922
, __extension__ __PRETTY_FUNCTION__))
;
1923 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1924 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1925 Inst.addOperand(MCOperand::createImm(IsSigned));
1926 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1927 }
1928
1929 template<int Shift>
1930 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1931 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1931
, __extension__ __PRETTY_FUNCTION__))
;
1932
1933 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1934 if (CE) {
1935 uint64_t Value = CE->getValue();
1936 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1937 } else {
1938 addExpr(Inst, getImm());
1939 }
1940 }
1941
1942 template<int Shift>
1943 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1944 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1944
, __extension__ __PRETTY_FUNCTION__))
;
1945
1946 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1947 uint64_t Value = CE->getValue();
1948 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1949 }
1950
1951 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1952 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1952
, __extension__ __PRETTY_FUNCTION__))
;
1953 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1954 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1955 }
1956
1957 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1958 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1958
, __extension__ __PRETTY_FUNCTION__))
;
1959 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1960 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1961 }
1962
1963 void print(raw_ostream &OS) const override;
1964
1965 static std::unique_ptr<AArch64Operand>
1966 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
1967 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1968 Op->Tok.Data = Str.data();
1969 Op->Tok.Length = Str.size();
1970 Op->Tok.IsSuffix = IsSuffix;
1971 Op->StartLoc = S;
1972 Op->EndLoc = S;
1973 return Op;
1974 }
1975
1976 static std::unique_ptr<AArch64Operand>
1977 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1978 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1979 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1980 unsigned ShiftAmount = 0,
1981 unsigned HasExplicitAmount = false) {
1982 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1983 Op->Reg.RegNum = RegNum;
1984 Op->Reg.Kind = Kind;
1985 Op->Reg.ElementWidth = 0;
1986 Op->Reg.EqualityTy = EqTy;
1987 Op->Reg.ShiftExtend.Type = ExtTy;
1988 Op->Reg.ShiftExtend.Amount = ShiftAmount;
1989 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1990 Op->StartLoc = S;
1991 Op->EndLoc = E;
1992 return Op;
1993 }
1994
1995 static std::unique_ptr<AArch64Operand>
1996 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1997 SMLoc S, SMLoc E, MCContext &Ctx,
1998 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1999 unsigned ShiftAmount = 0,
2000 unsigned HasExplicitAmount = false) {
2001 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
) && "Invalid vector kind") ? void (0) : __assert_fail
("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2003
, __extension__ __PRETTY_FUNCTION__))
2002 Kind == RegKind::SVEPredicateVector) &&(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
) && "Invalid vector kind") ? void (0) : __assert_fail
("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2003
, __extension__ __PRETTY_FUNCTION__))
2003 "Invalid vector kind")(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
) && "Invalid vector kind") ? void (0) : __assert_fail
("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2003
, __extension__ __PRETTY_FUNCTION__))
;
2004 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2005 HasExplicitAmount);
2006 Op->Reg.ElementWidth = ElementWidth;
2007 return Op;
2008 }
2009
2010 static std::unique_ptr<AArch64Operand>
2011 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
2012 unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
2013 MCContext &Ctx) {
2014 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2015 Op->VectorList.RegNum = RegNum;
2016 Op->VectorList.Count = Count;
2017 Op->VectorList.NumElements = NumElements;
2018 Op->VectorList.ElementWidth = ElementWidth;
2019 Op->VectorList.RegisterKind = RegisterKind;
2020 Op->StartLoc = S;
2021 Op->EndLoc = E;
2022 return Op;
2023 }
2024
2025 static std::unique_ptr<AArch64Operand>
2026 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2027 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2028 Op->VectorIndex.Val = Idx;
2029 Op->StartLoc = S;
2030 Op->EndLoc = E;
2031 return Op;
2032 }
2033
2034 static std::unique_ptr<AArch64Operand>
2035 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2036 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2037 Op->MatrixTileList.RegMask = RegMask;
2038 Op->StartLoc = S;
2039 Op->EndLoc = E;
2040 return Op;
2041 }
2042
2043 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2044 const unsigned ElementWidth) {
2045 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2046 RegMap = {
2047 {{0, AArch64::ZAB0},
2048 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2049 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2050 {{8, AArch64::ZAB0},
2051 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2052 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2053 {{16, AArch64::ZAH0},
2054 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2055 {{16, AArch64::ZAH1},
2056 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2057 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2058 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2059 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2060 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2061 };
2062
2063 if (ElementWidth == 64)
2064 OutRegs.insert(Reg);
2065 else {
2066 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2067 assert(!Regs.empty() && "Invalid tile or element width!")(static_cast <bool> (!Regs.empty() && "Invalid tile or element width!"
) ? void (0) : __assert_fail ("!Regs.empty() && \"Invalid tile or element width!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2067
, __extension__ __PRETTY_FUNCTION__))
;
2068 for (auto OutReg : Regs)
2069 OutRegs.insert(OutReg);
2070 }
2071 }
2072
2073 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2074 SMLoc E, MCContext &Ctx) {
2075 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2076 Op->Imm.Val = Val;
2077 Op->StartLoc = S;
2078 Op->EndLoc = E;
2079 return Op;
2080 }
2081
2082 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2083 unsigned ShiftAmount,
2084 SMLoc S, SMLoc E,
2085 MCContext &Ctx) {
2086 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2087 Op->ShiftedImm .Val = Val;
2088 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2089 Op->StartLoc = S;
2090 Op->EndLoc = E;
2091 return Op;
2092 }
2093
2094 static std::unique_ptr<AArch64Operand>
2095 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2096 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2097 Op->CondCode.Code = Code;
2098 Op->StartLoc = S;
2099 Op->EndLoc = E;
2100 return Op;
2101 }
2102
2103 static std::unique_ptr<AArch64Operand>
2104 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2105 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2106 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2107 Op->FPImm.IsExact = IsExact;
2108 Op->StartLoc = S;
2109 Op->EndLoc = S;
2110 return Op;
2111 }
2112
2113 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2114 StringRef Str,
2115 SMLoc S,
2116 MCContext &Ctx,
2117 bool HasnXSModifier) {
2118 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2119 Op->Barrier.Val = Val;
2120 Op->Barrier.Data = Str.data();
2121 Op->Barrier.Length = Str.size();
2122 Op->Barrier.HasnXSModifier = HasnXSModifier;
2123 Op->StartLoc = S;
2124 Op->EndLoc = S;
2125 return Op;
2126 }
2127
2128 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2129 uint32_t MRSReg,
2130 uint32_t MSRReg,
2131 uint32_t PStateField,
2132 MCContext &Ctx) {
2133 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2134 Op->SysReg.Data = Str.data();
2135 Op->SysReg.Length = Str.size();
2136 Op->SysReg.MRSReg = MRSReg;
2137 Op->SysReg.MSRReg = MSRReg;
2138 Op->SysReg.PStateField = PStateField;
2139 Op->StartLoc = S;
2140 Op->EndLoc = S;
2141 return Op;
2142 }
2143
2144 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2145 SMLoc E, MCContext &Ctx) {
2146 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2147 Op->SysCRImm.Val = Val;
2148 Op->StartLoc = S;
2149 Op->EndLoc = E;
2150 return Op;
2151 }
2152
2153 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2154 StringRef Str,
2155 SMLoc S,
2156 MCContext &Ctx) {
2157 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2158 Op->Prefetch.Val = Val;
2159 Op->Barrier.Data = Str.data();
2160 Op->Barrier.Length = Str.size();
2161 Op->StartLoc = S;
2162 Op->EndLoc = S;
2163 return Op;
2164 }
2165
2166 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2167 StringRef Str,
2168 SMLoc S,
2169 MCContext &Ctx) {
2170 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2171 Op->PSBHint.Val = Val;
2172 Op->PSBHint.Data = Str.data();
2173 Op->PSBHint.Length = Str.size();
2174 Op->StartLoc = S;
2175 Op->EndLoc = S;
2176 return Op;
2177 }
2178
2179 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2180 StringRef Str,
2181 SMLoc S,
2182 MCContext &Ctx) {
2183 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2184 Op->BTIHint.Val = Val | 32;
2185 Op->BTIHint.Data = Str.data();
2186 Op->BTIHint.Length = Str.size();
2187 Op->StartLoc = S;
2188 Op->EndLoc = S;
2189 return Op;
2190 }
2191
2192 static std::unique_ptr<AArch64Operand>
2193 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2194 SMLoc S, SMLoc E, MCContext &Ctx) {
2195 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2196 Op->MatrixReg.RegNum = RegNum;
2197 Op->MatrixReg.ElementWidth = ElementWidth;
2198 Op->MatrixReg.Kind = Kind;
2199 Op->StartLoc = S;
2200 Op->EndLoc = E;
2201 return Op;
2202 }
2203
2204 static std::unique_ptr<AArch64Operand>
2205 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2206 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2207 Op->SVCR.PStateField = PStateField;
2208 Op->SVCR.Data = Str.data();
2209 Op->SVCR.Length = Str.size();
2210 Op->StartLoc = S;
2211 Op->EndLoc = S;
2212 return Op;
2213 }
2214
2215 static std::unique_ptr<AArch64Operand>
2216 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2217 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2218 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2219 Op->ShiftExtend.Type = ShOp;
2220 Op->ShiftExtend.Amount = Val;
2221 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2222 Op->StartLoc = S;
2223 Op->EndLoc = E;
2224 return Op;
2225 }
2226};
2227
2228} // end anonymous namespace.
2229
2230void AArch64Operand::print(raw_ostream &OS) const {
2231 switch (Kind) {
2232 case k_FPImm:
2233 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2234 if (!getFPImmIsExact())
2235 OS << " (inexact)";
2236 OS << ">";
2237 break;
2238 case k_Barrier: {
2239 StringRef Name = getBarrierName();
2240 if (!Name.empty())
2241 OS << "<barrier " << Name << ">";
2242 else
2243 OS << "<barrier invalid #" << getBarrier() << ">";
2244 break;
2245 }
2246 case k_Immediate:
2247 OS << *getImm();
2248 break;
2249 case k_ShiftedImm: {
2250 unsigned Shift = getShiftedImmShift();
2251 OS << "<shiftedimm ";
2252 OS << *getShiftedImmVal();
2253 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2254 break;
2255 }
2256 case k_CondCode:
2257 OS << "<condcode " << getCondCode() << ">";
2258 break;
2259 case k_VectorList: {
2260 OS << "<vectorlist ";
2261 unsigned Reg = getVectorListStart();
2262 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2263 OS << Reg + i << " ";
2264 OS << ">";
2265 break;
2266 }
2267 case k_VectorIndex:
2268 OS << "<vectorindex " << getVectorIndex() << ">";
2269 break;
2270 case k_SysReg:
2271 OS << "<sysreg: " << getSysReg() << '>';
2272 break;
2273 case k_Token:
2274 OS << "'" << getToken() << "'";
2275 break;
2276 case k_SysCR:
2277 OS << "c" << getSysCR();
2278 break;
2279 case k_Prefetch: {
2280 StringRef Name = getPrefetchName();
2281 if (!Name.empty())
2282 OS << "<prfop " << Name << ">";
2283 else
2284 OS << "<prfop invalid #" << getPrefetch() << ">";
2285 break;
2286 }
2287 case k_PSBHint:
2288 OS << getPSBHintName();
2289 break;
2290 case k_BTIHint:
2291 OS << getBTIHintName();
2292 break;
2293 case k_MatrixRegister:
2294 OS << "<matrix " << getMatrixReg() << ">";
2295 break;
2296 case k_MatrixTileList: {
2297 OS << "<matrixlist ";
2298 unsigned RegMask = getMatrixTileListRegMask();
2299 unsigned MaxBits = 8;
2300 for (unsigned I = MaxBits; I > 0; --I)
2301 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2302 OS << '>';
2303 break;
2304 }
2305 case k_SVCR: {
2306 OS << getSVCR();
2307 break;
2308 }
2309 case k_Register:
2310 OS << "<register " << getReg() << ">";
2311 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2312 break;
2313 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2314 case k_ShiftExtend:
2315 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2316 << getShiftExtendAmount();
2317 if (!hasShiftExtendAmount())
2318 OS << "<imp>";
2319 OS << '>';
2320 break;
2321 }
2322}
2323
2324/// @name Auto-generated Match Functions
2325/// {
2326
2327static unsigned MatchRegisterName(StringRef Name);
2328
2329/// }
2330
2331static unsigned MatchNeonVectorRegName(StringRef Name) {
2332 return StringSwitch<unsigned>(Name.lower())
2333 .Case("v0", AArch64::Q0)
2334 .Case("v1", AArch64::Q1)
2335 .Case("v2", AArch64::Q2)
2336 .Case("v3", AArch64::Q3)
2337 .Case("v4", AArch64::Q4)
2338 .Case("v5", AArch64::Q5)
2339 .Case("v6", AArch64::Q6)
2340 .Case("v7", AArch64::Q7)
2341 .Case("v8", AArch64::Q8)
2342 .Case("v9", AArch64::Q9)
2343 .Case("v10", AArch64::Q10)
2344 .Case("v11", AArch64::Q11)
2345 .Case("v12", AArch64::Q12)
2346 .Case("v13", AArch64::Q13)
2347 .Case("v14", AArch64::Q14)
2348 .Case("v15", AArch64::Q15)
2349 .Case("v16", AArch64::Q16)
2350 .Case("v17", AArch64::Q17)
2351 .Case("v18", AArch64::Q18)
2352 .Case("v19", AArch64::Q19)
2353 .Case("v20", AArch64::Q20)
2354 .Case("v21", AArch64::Q21)
2355 .Case("v22", AArch64::Q22)
2356 .Case("v23", AArch64::Q23)
2357 .Case("v24", AArch64::Q24)
2358 .Case("v25", AArch64::Q25)
2359 .Case("v26", AArch64::Q26)
2360 .Case("v27", AArch64::Q27)
2361 .Case("v28", AArch64::Q28)
2362 .Case("v29", AArch64::Q29)
2363 .Case("v30", AArch64::Q30)
2364 .Case("v31", AArch64::Q31)
2365 .Default(0);
2366}
2367
2368/// Returns an optional pair of (#elements, element-width) if Suffix
2369/// is a valid vector kind. Where the number of elements in a vector
2370/// or the vector width is implicit or explicitly unknown (but still a
2371/// valid suffix kind), 0 is used.
2372static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2373 RegKind VectorKind) {
2374 std::pair<int, int> Res = {-1, -1};
2375
2376 switch (VectorKind) {
2377 case RegKind::NeonVector:
2378 Res =
2379 StringSwitch<std::pair<int, int>>(Suffix.lower())
2380 .Case("", {0, 0})
2381 .Case(".1d", {1, 64})
2382 .Case(".1q", {1, 128})
2383 // '.2h' needed for fp16 scalar pairwise reductions
2384 .Case(".2h", {2, 16})
2385 .Case(".2s", {2, 32})
2386 .Case(".2d", {2, 64})
2387 // '.4b' is another special case for the ARMv8.2a dot product
2388 // operand
2389 .Case(".4b", {4, 8})
2390 .Case(".4h", {4, 16})
2391 .Case(".4s", {4, 32})
2392 .Case(".8b", {8, 8})
2393 .Case(".8h", {8, 16})
2394 .Case(".16b", {16, 8})
2395 // Accept the width neutral ones, too, for verbose syntax. If those
2396 // aren't used in the right places, the token operand won't match so
2397 // all will work out.
2398 .Case(".b", {0, 8})
2399 .Case(".h", {0, 16})
2400 .Case(".s", {0, 32})
2401 .Case(".d", {0, 64})
2402 .Default({-1, -1});
2403 break;
2404 case RegKind::SVEPredicateVector:
2405 case RegKind::SVEDataVector:
2406 case RegKind::Matrix:
2407 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2408 .Case("", {0, 0})
2409 .Case(".b", {0, 8})
2410 .Case(".h", {0, 16})
2411 .Case(".s", {0, 32})
2412 .Case(".d", {0, 64})
2413 .Case(".q", {0, 128})
2414 .Default({-1, -1});
2415 break;
2416 default:
2417 llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2417)
;
2418 }
2419
2420 if (Res == std::make_pair(-1, -1))
2421 return Optional<std::pair<int, int>>();
2422
2423 return Optional<std::pair<int, int>>(Res);
2424}
2425
2426static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2427 return parseVectorKind(Suffix, VectorKind).hasValue();
2428}
2429
2430static unsigned matchSVEDataVectorRegName(StringRef Name) {
2431 return StringSwitch<unsigned>(Name.lower())
2432 .Case("z0", AArch64::Z0)
2433 .Case("z1", AArch64::Z1)
2434 .Case("z2", AArch64::Z2)
2435 .Case("z3", AArch64::Z3)
2436 .Case("z4", AArch64::Z4)
2437 .Case("z5", AArch64::Z5)
2438 .Case("z6", AArch64::Z6)
2439 .Case("z7", AArch64::Z7)
2440 .Case("z8", AArch64::Z8)
2441 .Case("z9", AArch64::Z9)
2442 .Case("z10", AArch64::Z10)
2443 .Case("z11", AArch64::Z11)
2444 .Case("z12", AArch64::Z12)
2445 .Case("z13", AArch64::Z13)
2446 .Case("z14", AArch64::Z14)
2447 .Case("z15", AArch64::Z15)
2448 .Case("z16", AArch64::Z16)
2449 .Case("z17", AArch64::Z17)
2450 .Case("z18", AArch64::Z18)
2451 .Case("z19", AArch64::Z19)
2452 .Case("z20", AArch64::Z20)
2453 .Case("z21", AArch64::Z21)
2454 .Case("z22", AArch64::Z22)
2455 .Case("z23", AArch64::Z23)
2456 .Case("z24", AArch64::Z24)
2457 .Case("z25", AArch64::Z25)
2458 .Case("z26", AArch64::Z26)
2459 .Case("z27", AArch64::Z27)
2460 .Case("z28", AArch64::Z28)
2461 .Case("z29", AArch64::Z29)
2462 .Case("z30", AArch64::Z30)
2463 .Case("z31", AArch64::Z31)
2464 .Default(0);
2465}
2466
2467static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2468 return StringSwitch<unsigned>(Name.lower())
2469 .Case("p0", AArch64::P0)
2470 .Case("p1", AArch64::P1)
2471 .Case("p2", AArch64::P2)
2472 .Case("p3", AArch64::P3)
2473 .Case("p4", AArch64::P4)
2474 .Case("p5", AArch64::P5)
2475 .Case("p6", AArch64::P6)
2476 .Case("p7", AArch64::P7)
2477 .Case("p8", AArch64::P8)
2478 .Case("p9", AArch64::P9)
2479 .Case("p10", AArch64::P10)
2480 .Case("p11", AArch64::P11)
2481 .Case("p12", AArch64::P12)
2482 .Case("p13", AArch64::P13)
2483 .Case("p14", AArch64::P14)
2484 .Case("p15", AArch64::P15)
2485 .Default(0);
2486}
2487
2488static unsigned matchMatrixTileListRegName(StringRef Name) {
2489 return StringSwitch<unsigned>(Name.lower())
2490 .Case("za0.d", AArch64::ZAD0)
2491 .Case("za1.d", AArch64::ZAD1)
2492 .Case("za2.d", AArch64::ZAD2)
2493 .Case("za3.d", AArch64::ZAD3)
2494 .Case("za4.d", AArch64::ZAD4)
2495 .Case("za5.d", AArch64::ZAD5)
2496 .Case("za6.d", AArch64::ZAD6)
2497 .Case("za7.d", AArch64::ZAD7)
2498 .Case("za0.s", AArch64::ZAS0)
2499 .Case("za1.s", AArch64::ZAS1)
2500 .Case("za2.s", AArch64::ZAS2)
2501 .Case("za3.s", AArch64::ZAS3)
2502 .Case("za0.h", AArch64::ZAH0)
2503 .Case("za1.h", AArch64::ZAH1)
2504 .Case("za0.b", AArch64::ZAB0)
2505 .Default(0);
2506}
2507
2508static unsigned matchMatrixRegName(StringRef Name) {
2509 return StringSwitch<unsigned>(Name.lower())
2510 .Case("za", AArch64::ZA)
2511 .Case("za0.q", AArch64::ZAQ0)
2512 .Case("za1.q", AArch64::ZAQ1)
2513 .Case("za2.q", AArch64::ZAQ2)
2514 .Case("za3.q", AArch64::ZAQ3)
2515 .Case("za4.q", AArch64::ZAQ4)
2516 .Case("za5.q", AArch64::ZAQ5)
2517 .Case("za6.q", AArch64::ZAQ6)
2518 .Case("za7.q", AArch64::ZAQ7)
2519 .Case("za8.q", AArch64::ZAQ8)
2520 .Case("za9.q", AArch64::ZAQ9)
2521 .Case("za10.q", AArch64::ZAQ10)
2522 .Case("za11.q", AArch64::ZAQ11)
2523 .Case("za12.q", AArch64::ZAQ12)
2524 .Case("za13.q", AArch64::ZAQ13)
2525 .Case("za14.q", AArch64::ZAQ14)
2526 .Case("za15.q", AArch64::ZAQ15)
2527 .Case("za0.d", AArch64::ZAD0)
2528 .Case("za1.d", AArch64::ZAD1)
2529 .Case("za2.d", AArch64::ZAD2)
2530 .Case("za3.d", AArch64::ZAD3)
2531 .Case("za4.d", AArch64::ZAD4)
2532 .Case("za5.d", AArch64::ZAD5)
2533 .Case("za6.d", AArch64::ZAD6)
2534 .Case("za7.d", AArch64::ZAD7)
2535 .Case("za0.s", AArch64::ZAS0)
2536 .Case("za1.s", AArch64::ZAS1)
2537 .Case("za2.s", AArch64::ZAS2)
2538 .Case("za3.s", AArch64::ZAS3)
2539 .Case("za0.h", AArch64::ZAH0)
2540 .Case("za1.h", AArch64::ZAH1)
2541 .Case("za0.b", AArch64::ZAB0)
2542 .Case("za0h.q", AArch64::ZAQ0)
2543 .Case("za1h.q", AArch64::ZAQ1)
2544 .Case("za2h.q", AArch64::ZAQ2)
2545 .Case("za3h.q", AArch64::ZAQ3)
2546 .Case("za4h.q", AArch64::ZAQ4)
2547 .Case("za5h.q", AArch64::ZAQ5)
2548 .Case("za6h.q", AArch64::ZAQ6)
2549 .Case("za7h.q", AArch64::ZAQ7)
2550 .Case("za8h.q", AArch64::ZAQ8)
2551 .Case("za9h.q", AArch64::ZAQ9)
2552 .Case("za10h.q", AArch64::ZAQ10)
2553 .Case("za11h.q", AArch64::ZAQ11)
2554 .Case("za12h.q", AArch64::ZAQ12)
2555 .Case("za13h.q", AArch64::ZAQ13)
2556 .Case("za14h.q", AArch64::ZAQ14)
2557 .Case("za15h.q", AArch64::ZAQ15)
2558 .Case("za0h.d", AArch64::ZAD0)
2559 .Case("za1h.d", AArch64::ZAD1)
2560 .Case("za2h.d", AArch64::ZAD2)
2561 .Case("za3h.d", AArch64::ZAD3)
2562 .Case("za4h.d", AArch64::ZAD4)
2563 .Case("za5h.d", AArch64::ZAD5)
2564 .Case("za6h.d", AArch64::ZAD6)
2565 .Case("za7h.d", AArch64::ZAD7)
2566 .Case("za0h.s", AArch64::ZAS0)
2567 .Case("za1h.s", AArch64::ZAS1)
2568 .Case("za2h.s", AArch64::ZAS2)
2569 .Case("za3h.s", AArch64::ZAS3)
2570 .Case("za0h.h", AArch64::ZAH0)
2571 .Case("za1h.h", AArch64::ZAH1)
2572 .Case("za0h.b", AArch64::ZAB0)
2573 .Case("za0v.q", AArch64::ZAQ0)
2574 .Case("za1v.q", AArch64::ZAQ1)
2575 .Case("za2v.q", AArch64::ZAQ2)
2576 .Case("za3v.q", AArch64::ZAQ3)
2577 .Case("za4v.q", AArch64::ZAQ4)
2578 .Case("za5v.q", AArch64::ZAQ5)
2579 .Case("za6v.q", AArch64::ZAQ6)
2580 .Case("za7v.q", AArch64::ZAQ7)
2581 .Case("za8v.q", AArch64::ZAQ8)
2582 .Case("za9v.q", AArch64::ZAQ9)
2583 .Case("za10v.q", AArch64::ZAQ10)
2584 .Case("za11v.q", AArch64::ZAQ11)
2585 .Case("za12v.q", AArch64::ZAQ12)
2586 .Case("za13v.q", AArch64::ZAQ13)
2587 .Case("za14v.q", AArch64::ZAQ14)
2588 .Case("za15v.q", AArch64::ZAQ15)
2589 .Case("za0v.d", AArch64::ZAD0)
2590 .Case("za1v.d", AArch64::ZAD1)
2591 .Case("za2v.d", AArch64::ZAD2)
2592 .Case("za3v.d", AArch64::ZAD3)
2593 .Case("za4v.d", AArch64::ZAD4)
2594 .Case("za5v.d", AArch64::ZAD5)
2595 .Case("za6v.d", AArch64::ZAD6)
2596 .Case("za7v.d", AArch64::ZAD7)
2597 .Case("za0v.s", AArch64::ZAS0)
2598 .Case("za1v.s", AArch64::ZAS1)
2599 .Case("za2v.s", AArch64::ZAS2)
2600 .Case("za3v.s", AArch64::ZAS3)
2601 .Case("za0v.h", AArch64::ZAH0)
2602 .Case("za1v.h", AArch64::ZAH1)
2603 .Case("za0v.b", AArch64::ZAB0)
2604 .Default(0);
2605}
2606
2607bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2608 SMLoc &EndLoc) {
2609 return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
29
Calling 'AArch64AsmParser::tryParseRegister'
35
Returning from 'AArch64AsmParser::tryParseRegister'
36
Returning without writing to 'RegNo'
2610}
2611
2612OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2613 SMLoc &StartLoc,
2614 SMLoc &EndLoc) {
2615 StartLoc = getLoc();
2616 auto Res = tryParseScalarRegister(RegNo);
30
Calling 'AArch64AsmParser::tryParseScalarRegister'
33
Returning from 'AArch64AsmParser::tryParseScalarRegister'
2617 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2618 return Res;
34
Returning without writing to 'RegNo'
2619}
2620
2621// Matches a register name or register alias previously defined by '.req'
2622unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2623 RegKind Kind) {
2624 unsigned RegNum = 0;
2625 if ((RegNum = matchSVEDataVectorRegName(Name)))
2626 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2627
2628 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2629 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2630
2631 if ((RegNum = MatchNeonVectorRegName(Name)))
2632 return Kind == RegKind::NeonVector ? RegNum : 0;
2633
2634 if ((RegNum = matchMatrixRegName(Name)))
2635 return Kind == RegKind::Matrix ? RegNum : 0;
2636
2637 // The parsed register must be of RegKind Scalar
2638 if ((RegNum = MatchRegisterName(Name)))
2639 return Kind == RegKind::Scalar ? RegNum : 0;
2640
2641 if (!RegNum) {
2642 // Handle a few common aliases of registers.
2643 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2644 .Case("fp", AArch64::FP)
2645 .Case("lr", AArch64::LR)
2646 .Case("x31", AArch64::XZR)
2647 .Case("w31", AArch64::WZR)
2648 .Default(0))
2649 return Kind == RegKind::Scalar ? RegNum : 0;
2650
2651 // Check for aliases registered via .req. Canonicalize to lower case.
2652 // That's more consistent since register names are case insensitive, and
2653 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2654 auto Entry = RegisterReqs.find(Name.lower());
2655 if (Entry == RegisterReqs.end())
2656 return 0;
2657
2658 // set RegNum if the match is the right kind of register
2659 if (Kind == Entry->getValue().first)
2660 RegNum = Entry->getValue().second;
2661 }
2662 return RegNum;
2663}
2664
2665/// tryParseScalarRegister - Try to parse a register name. The token must be an
2666/// Identifier when called, and if it is a register name the token is eaten and
2667/// the register is added to the operand list.
2668OperandMatchResultTy
2669AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2670 const AsmToken &Tok = getTok();
2671 if (Tok.isNot(AsmToken::Identifier))
31
Taking true branch
2672 return MatchOperand_NoMatch;
32
Returning without writing to 'Reg'
2673
2674 std::string lowerCase = Tok.getString().lower();
2675 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2676 if (Reg == 0)
2677 return MatchOperand_NoMatch;
2678
2679 RegNum = Reg;
2680 Lex(); // Eat identifier token.
2681 return MatchOperand_Success;
2682}
2683
2684/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2685OperandMatchResultTy
2686AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2687 SMLoc S = getLoc();
2688
2689 if (getTok().isNot(AsmToken::Identifier)) {
2690 Error(S, "Expected cN operand where 0 <= N <= 15");
2691 return MatchOperand_ParseFail;
2692 }
2693
2694 StringRef Tok = getTok().getIdentifier();
2695 if (Tok[0] != 'c' && Tok[0] != 'C') {
2696 Error(S, "Expected cN operand where 0 <= N <= 15");
2697 return MatchOperand_ParseFail;
2698 }
2699
2700 uint32_t CRNum;
2701 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2702 if (BadNum || CRNum > 15) {
2703 Error(S, "Expected cN operand where 0 <= N <= 15");
2704 return MatchOperand_ParseFail;
2705 }
2706
2707 Lex(); // Eat identifier token.
2708 Operands.push_back(
2709 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2710 return MatchOperand_Success;
2711}
2712
2713/// tryParsePrefetch - Try to parse a prefetch operand.
2714template <bool IsSVEPrefetch>
2715OperandMatchResultTy
2716AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2717 SMLoc S = getLoc();
2718 const AsmToken &Tok = getTok();
2719
2720 auto LookupByName = [](StringRef N) {
2721 if (IsSVEPrefetch) {
2722 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2723 return Optional<unsigned>(Res->Encoding);
2724 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2725 return Optional<unsigned>(Res->Encoding);
2726 return Optional<unsigned>();
2727 };
2728
2729 auto LookupByEncoding = [](unsigned E) {
2730 if (IsSVEPrefetch) {
2731 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2732 return Optional<StringRef>(Res->Name);
2733 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2734 return Optional<StringRef>(Res->Name);
2735 return Optional<StringRef>();
2736 };
2737 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2738
2739 // Either an identifier for named values or a 5-bit immediate.
2740 // Eat optional hash.
2741 if (parseOptionalToken(AsmToken::Hash) ||
2742 Tok.is(AsmToken::Integer)) {
2743 const MCExpr *ImmVal;
2744 if (getParser().parseExpression(ImmVal))
2745 return MatchOperand_ParseFail;
2746
2747 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2748 if (!MCE) {
2749 TokError("immediate value expected for prefetch operand");
2750 return MatchOperand_ParseFail;
2751 }
2752 unsigned prfop = MCE->getValue();
2753 if (prfop > MaxVal) {
2754 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2755 "] expected");
2756 return MatchOperand_ParseFail;
2757 }
2758
2759 auto PRFM = LookupByEncoding(MCE->getValue());
2760 Operands.push_back(AArch64Operand::CreatePrefetch(
2761 prfop, PRFM.getValueOr(""), S, getContext()));
2762 return MatchOperand_Success;
2763 }
2764
2765 if (Tok.isNot(AsmToken::Identifier)) {
2766 TokError("prefetch hint expected");
2767 return MatchOperand_ParseFail;
2768 }
2769
2770 auto PRFM = LookupByName(Tok.getString());
2771 if (!PRFM) {
2772 TokError("prefetch hint expected");
2773 return MatchOperand_ParseFail;
2774 }
2775
2776 Operands.push_back(AArch64Operand::CreatePrefetch(
2777 *PRFM, Tok.getString(), S, getContext()));
2778 Lex(); // Eat identifier token.
2779 return MatchOperand_Success;
2780}
2781
2782/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2783OperandMatchResultTy
2784AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2785 SMLoc S = getLoc();
2786 const AsmToken &Tok = getTok();
2787 if (Tok.isNot(AsmToken::Identifier)) {
2788 TokError("invalid operand for instruction");
2789 return MatchOperand_ParseFail;
2790 }
2791
2792 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2793 if (!PSB) {
2794 TokError("invalid operand for instruction");
2795 return MatchOperand_ParseFail;
2796 }
2797
2798 Operands.push_back(AArch64Operand::CreatePSBHint(
2799 PSB->Encoding, Tok.getString(), S, getContext()));
2800 Lex(); // Eat identifier token.
2801 return MatchOperand_Success;
2802}
2803
2804/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2805OperandMatchResultTy
2806AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2807 SMLoc S = getLoc();
2808 const AsmToken &Tok = getTok();
2809 if (Tok.isNot(AsmToken::Identifier)) {
2810 TokError("invalid operand for instruction");
2811 return MatchOperand_ParseFail;
2812 }
2813
2814 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2815 if (!BTI) {
2816 TokError("invalid operand for instruction");
2817 return MatchOperand_ParseFail;
2818 }
2819
2820 Operands.push_back(AArch64Operand::CreateBTIHint(
2821 BTI->Encoding, Tok.getString(), S, getContext()));
2822 Lex(); // Eat identifier token.
2823 return MatchOperand_Success;
2824}
2825
2826/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2827/// instruction.
2828OperandMatchResultTy
2829AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2830 SMLoc S = getLoc();
2831 const MCExpr *Expr = nullptr;
2832
2833 if (getTok().is(AsmToken::Hash)) {
2834 Lex(); // Eat hash token.
2835 }
2836
2837 if (parseSymbolicImmVal(Expr))
2838 return MatchOperand_ParseFail;
2839
2840 AArch64MCExpr::VariantKind ELFRefKind;
2841 MCSymbolRefExpr::VariantKind DarwinRefKind;
2842 int64_t Addend;
2843 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2844 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2845 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2846 // No modifier was specified at all; this is the syntax for an ELF basic
2847 // ADRP relocation (unfortunately).
2848 Expr =
2849 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2850 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2851 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2852 Addend != 0) {
2853 Error(S, "gotpage label reference not allowed an addend");
2854 return MatchOperand_ParseFail;
2855 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2856 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2857 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2858 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2859 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2860 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
2861 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2862 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2863 // The operand must be an @page or @gotpage qualified symbolref.
2864 Error(S, "page or gotpage label reference expected");
2865 return MatchOperand_ParseFail;
2866 }
2867 }
2868
2869 // We have either a label reference possibly with addend or an immediate. The
2870 // addend is a raw value here. The linker will adjust it to only reference the
2871 // page.
2872 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2873 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2874
2875 return MatchOperand_Success;
2876}
2877
2878/// tryParseAdrLabel - Parse and validate a source label for the ADR
2879/// instruction.
2880OperandMatchResultTy
2881AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2882 SMLoc S = getLoc();
2883 const MCExpr *Expr = nullptr;
2884
2885 // Leave anything with a bracket to the default for SVE
2886 if (getTok().is(AsmToken::LBrac))
2887 return MatchOperand_NoMatch;
2888
2889 if (getTok().is(AsmToken::Hash))
2890 Lex(); // Eat hash token.
2891
2892 if (parseSymbolicImmVal(Expr))
2893 return MatchOperand_ParseFail;
2894
2895 AArch64MCExpr::VariantKind ELFRefKind;
2896 MCSymbolRefExpr::VariantKind DarwinRefKind;
2897 int64_t Addend;
2898 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2899 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2900 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2901 // No modifier was specified at all; this is the syntax for an ELF basic
2902 // ADR relocation (unfortunately).
2903 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2904 } else {
2905 Error(S, "unexpected adr label");
2906 return MatchOperand_ParseFail;
2907 }
2908 }
2909
2910 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2911 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2912 return MatchOperand_Success;
2913}
2914
2915/// tryParseFPImm - A floating point immediate expression operand.
2916template<bool AddFPZeroAsLiteral>
2917OperandMatchResultTy
2918AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2919 SMLoc S = getLoc();
2920
2921 bool Hash = parseOptionalToken(AsmToken::Hash);
2922
2923 // Handle negation, as that still comes through as a separate token.
2924 bool isNegative = parseOptionalToken(AsmToken::Minus);
2925
2926 const AsmToken &Tok = getTok();
2927 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2928 if (!Hash)
2929 return MatchOperand_NoMatch;
2930 TokError("invalid floating point immediate");
2931 return MatchOperand_ParseFail;
2932 }
2933
2934 // Parse hexadecimal representation.
2935 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2936 if (Tok.getIntVal() > 255 || isNegative) {
2937 TokError("encoded floating point value out of range");
2938 return MatchOperand_ParseFail;
2939 }
2940
2941 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2942 Operands.push_back(
2943 AArch64Operand::CreateFPImm(F, true, S, getContext()));
2944 } else {
2945 // Parse FP representation.
2946 APFloat RealVal(APFloat::IEEEdouble());
2947 auto StatusOrErr =
2948 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2949 if (errorToBool(StatusOrErr.takeError())) {
2950 TokError("invalid floating point representation");
2951 return MatchOperand_ParseFail;
2952 }
2953
2954 if (isNegative)
2955 RealVal.changeSign();
2956
2957 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2958 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
2959 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
2960 } else
2961 Operands.push_back(AArch64Operand::CreateFPImm(
2962 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2963 }
2964
2965 Lex(); // Eat the token.
2966
2967 return MatchOperand_Success;
2968}
2969
2970/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2971/// a shift suffix, for example '#1, lsl #12'.
2972OperandMatchResultTy
2973AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2974 SMLoc S = getLoc();
2975
2976 if (getTok().is(AsmToken::Hash))
2977 Lex(); // Eat '#'
2978 else if (getTok().isNot(AsmToken::Integer))
2979 // Operand should start from # or should be integer, emit error otherwise.
2980 return MatchOperand_NoMatch;
2981
2982 const MCExpr *Imm = nullptr;
2983 if (parseSymbolicImmVal(Imm))
2984 return MatchOperand_ParseFail;
2985 else if (getTok().isNot(AsmToken::Comma)) {
2986 Operands.push_back(
2987 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
2988 return MatchOperand_Success;
2989 }
2990
2991 // Eat ','
2992 Lex();
2993
2994 // The optional operand must be "lsl #N" where N is non-negative.
2995 if (!getTok().is(AsmToken::Identifier) ||
2996 !getTok().getIdentifier().equals_insensitive("lsl")) {
2997 Error(getLoc(), "only 'lsl #+N' valid after immediate");
2998 return MatchOperand_ParseFail;
2999 }
3000
3001 // Eat 'lsl'
3002 Lex();
3003
3004 parseOptionalToken(AsmToken::Hash);
3005
3006 if (getTok().isNot(AsmToken::Integer)) {
3007 Error(getLoc(), "only 'lsl #+N' valid after immediate");
3008 return MatchOperand_ParseFail;
3009 }
3010
3011 int64_t ShiftAmount = getTok().getIntVal();
3012
3013 if (ShiftAmount < 0) {
3014 Error(getLoc(), "positive shift amount required");
3015 return MatchOperand_ParseFail;
3016 }
3017 Lex(); // Eat the number
3018
3019 // Just in case the optional lsl #0 is used for immediates other than zero.
3020 if (ShiftAmount == 0 && Imm != nullptr) {
3021 Operands.push_back(
3022 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3023 return MatchOperand_Success;
3024 }
3025
3026 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3027 getLoc(), getContext()));
3028 return MatchOperand_Success;
3029}
3030
3031/// parseCondCodeString - Parse a Condition Code string.
3032AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
3033 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3034 .Case("eq", AArch64CC::EQ)
3035 .Case("ne", AArch64CC::NE)
3036 .Case("cs", AArch64CC::HS)
3037 .Case("hs", AArch64CC::HS)
3038 .Case("cc", AArch64CC::LO)
3039 .Case("lo", AArch64CC::LO)
3040 .Case("mi", AArch64CC::MI)
3041 .Case("pl", AArch64CC::PL)
3042 .Case("vs", AArch64CC::VS)
3043 .Case("vc", AArch64CC::VC)
3044 .Case("hi", AArch64CC::HI)
3045 .Case("ls", AArch64CC::LS)
3046 .Case("ge", AArch64CC::GE)
3047 .Case("lt", AArch64CC::LT)
3048 .Case("gt", AArch64CC::GT)
3049 .Case("le", AArch64CC::LE)
3050 .Case("al", AArch64CC::AL)
3051 .Case("nv", AArch64CC::NV)
3052 .Default(AArch64CC::Invalid);
3053
3054 if (CC == AArch64CC::Invalid &&
3055 getSTI().getFeatureBits()[AArch64::FeatureSVE])
3056 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3057 .Case("none", AArch64CC::EQ)
3058 .Case("any", AArch64CC::NE)
3059 .Case("nlast", AArch64CC::HS)
3060 .Case("last", AArch64CC::LO)
3061 .Case("first", AArch64CC::MI)
3062 .Case("nfrst", AArch64CC::PL)
3063 .Case("pmore", AArch64CC::HI)
3064 .Case("plast", AArch64CC::LS)
3065 .Case("tcont", AArch64CC::GE)
3066 .Case("tstop", AArch64CC::LT)
3067 .Default(AArch64CC::Invalid);
3068
3069 return CC;
3070}
3071
3072/// parseCondCode - Parse a Condition Code operand.
3073bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3074 bool invertCondCode) {
3075 SMLoc S = getLoc();
3076 const AsmToken &Tok = getTok();
3077 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")(static_cast <bool> (Tok.is(AsmToken::Identifier) &&
"Token is not an Identifier") ? void (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3077
, __extension__ __PRETTY_FUNCTION__))
;
3078
3079 StringRef Cond = Tok.getString();
3080 AArch64CC::CondCode CC = parseCondCodeString(Cond);
3081 if (CC == AArch64CC::Invalid)
3082 return TokError("invalid condition code");
3083 Lex(); // Eat identifier token.
3084
3085 if (invertCondCode) {
3086 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3087 return TokError("condition codes AL and NV are invalid for this instruction");
3088 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
3089 }
3090
3091 Operands.push_back(
3092 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3093 return false;
3094}
3095
3096OperandMatchResultTy
3097AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3098 const AsmToken &Tok = getTok();
3099 SMLoc S = getLoc();
3100
3101 if (Tok.isNot(AsmToken::Identifier)) {
3102 TokError("invalid operand for instruction");
3103 return MatchOperand_ParseFail;
3104 }
3105
3106 unsigned PStateImm = -1;
3107 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3108 if (SVCR && SVCR->haveFeatures(getSTI().getFeatureBits()))
3109 PStateImm = SVCR->Encoding;
3110
3111 Operands.push_back(
3112 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3113 Lex(); // Eat identifier token.
3114 return MatchOperand_Success;
3115}
3116
3117OperandMatchResultTy
3118AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3119 const AsmToken &Tok = getTok();
3120 SMLoc S = getLoc();
3121
3122 StringRef Name = Tok.getString();
3123
3124 if (Name.equals_insensitive("za")) {
3125 Lex(); // eat "za"
3126 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3127 AArch64::ZA, /*ElementWidth=*/0, MatrixKind::Array, S, getLoc(),
3128 getContext()));
3129 if (getLexer().is(AsmToken::LBrac)) {
3130 // There's no comma after matrix operand, so we can parse the next operand
3131 // immediately.
3132 if (parseOperand(Operands, false, false))
3133 return MatchOperand_NoMatch;
3134 }
3135 return MatchOperand_Success;
3136 }
3137
3138 // Try to parse matrix register.
3139 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3140 if (!Reg)
3141 return MatchOperand_NoMatch;
3142
3143 size_t DotPosition = Name.find('.');
3144 assert(DotPosition != StringRef::npos && "Unexpected register")(static_cast <bool> (DotPosition != StringRef::npos &&
"Unexpected register") ? void (0) : __assert_fail ("DotPosition != StringRef::npos && \"Unexpected register\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3144
, __extension__ __PRETTY_FUNCTION__))
;
3145
3146 StringRef Head = Name.take_front(DotPosition);
3147 StringRef Tail = Name.drop_front(DotPosition);
3148 StringRef RowOrColumn = Head.take_back();
3149
3150 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn)
3151 .Case("h", MatrixKind::Row)
3152 .Case("v", MatrixKind::Col)
3153 .Default(MatrixKind::Tile);
3154
3155 // Next up, parsing the suffix
3156 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3157 if (!KindRes) {
3158 TokError("Expected the register to be followed by element width suffix");
3159 return MatchOperand_ParseFail;
3160 }
3161 unsigned ElementWidth = KindRes->second;
3162
3163 Lex();
3164
3165 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3166 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3167
3168 if (getLexer().is(AsmToken::LBrac)) {
3169 // There's no comma after matrix operand, so we can parse the next operand
3170 // immediately.
3171 if (parseOperand(Operands, false, false))
3172 return MatchOperand_NoMatch;
3173 }
3174 return MatchOperand_Success;
3175}
3176
3177/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3178/// them if present.
3179OperandMatchResultTy
3180AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3181 const AsmToken &Tok = getTok();
3182 std::string LowerID = Tok.getString().lower();
3183 AArch64_AM::ShiftExtendType ShOp =
3184 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3185 .Case("lsl", AArch64_AM::LSL)
3186 .Case("lsr", AArch64_AM::LSR)
3187 .Case("asr", AArch64_AM::ASR)
3188 .Case("ror", AArch64_AM::ROR)
3189 .Case("msl", AArch64_AM::MSL)
3190 .Case("uxtb", AArch64_AM::UXTB)
3191 .Case("uxth", AArch64_AM::UXTH)
3192 .Case("uxtw", AArch64_AM::UXTW)
3193 .Case("uxtx", AArch64_AM::UXTX)
3194 .Case("sxtb", AArch64_AM::SXTB)
3195 .Case("sxth", AArch64_AM::SXTH)
3196 .Case("sxtw", AArch64_AM::SXTW)
3197 .Case("sxtx", AArch64_AM::SXTX)
3198 .Default(AArch64_AM::InvalidShiftExtend);
3199
3200 if (ShOp == AArch64_AM::InvalidShiftExtend)
3201 return MatchOperand_NoMatch;
3202
3203 SMLoc S = Tok.getLoc();
3204 Lex();
3205
3206 bool Hash = parseOptionalToken(AsmToken::Hash);
3207
3208 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3209 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3210 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3211 ShOp == AArch64_AM::MSL) {
3212 // We expect a number here.
3213 TokError("expected #imm after shift specifier");
3214 return MatchOperand_ParseFail;
3215 }
3216
3217 // "extend" type operations don't need an immediate, #0 is implicit.
3218 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3219 Operands.push_back(
3220 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3221 return MatchOperand_Success;
3222 }
3223
3224 // Make sure we do actually have a number, identifier or a parenthesized
3225 // expression.
3226 SMLoc E = getLoc();
3227 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3228 !getTok().is(AsmToken::Identifier)) {
3229 Error(E, "expected integer shift amount");
3230 return MatchOperand_ParseFail;
3231 }
3232
3233 const MCExpr *ImmVal;
3234 if (getParser().parseExpression(ImmVal))
3235 return MatchOperand_ParseFail;
3236
3237 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3238 if (!MCE) {
3239 Error(E, "expected constant '#imm' after shift specifier");
3240 return MatchOperand_ParseFail;
3241 }
3242
3243 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3244 Operands.push_back(AArch64Operand::CreateShiftExtend(
3245 ShOp, MCE->getValue(), true, S, E, getContext()));
3246 return MatchOperand_Success;
3247}
3248
3249static const struct Extension {
3250 const char *Name;
3251 const FeatureBitset Features;
3252} ExtensionMap[] = {
3253 {"crc", {AArch64::FeatureCRC}},
3254 {"sm4", {AArch64::FeatureSM4}},
3255 {"sha3", {AArch64::FeatureSHA3}},
3256 {"sha2", {AArch64::FeatureSHA2}},
3257 {"aes", {AArch64::FeatureAES}},
3258 {"crypto", {AArch64::FeatureCrypto}},
3259 {"fp", {AArch64::FeatureFPARMv8}},
3260 {"simd", {AArch64::FeatureNEON}},
3261 {"ras", {AArch64::FeatureRAS}},
3262 {"lse", {AArch64::FeatureLSE}},
3263 {"predres", {AArch64::FeaturePredRes}},
3264 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3265 {"mte", {AArch64::FeatureMTE}},
3266 {"memtag", {AArch64::FeatureMTE}},
3267 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3268 {"pan", {AArch64::FeaturePAN}},
3269 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3270 {"ccpp", {AArch64::FeatureCCPP}},
3271 {"rcpc", {AArch64::FeatureRCPC}},
3272 {"rng", {AArch64::FeatureRandGen}},
3273 {"sve", {AArch64::FeatureSVE}},
3274 {"sve2", {AArch64::FeatureSVE2}},
3275 {"sve2-aes", {AArch64::FeatureSVE2AES}},
3276 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3277 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3278 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3279 {"ls64", {AArch64::FeatureLS64}},
3280 {"xs", {AArch64::FeatureXS}},
3281 {"pauth", {AArch64::FeaturePAuth}},
3282 {"flagm", {AArch64::FeatureFlagM}},
3283 {"rme", {AArch64::FeatureRME}},
3284 {"sme", {AArch64::FeatureSME}},
3285 {"sme-f64", {AArch64::FeatureSMEF64}},
3286 {"sme-i64", {AArch64::FeatureSMEI64}},
3287 // FIXME: Unsupported extensions
3288 {"lor", {}},
3289 {"rdma", {}},
3290 {"profile", {}},
3291};
3292
3293static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3294 if (FBS[AArch64::HasV8_0aOps])
3295 Str += "ARMv8a";
3296 if (FBS[AArch64::HasV8_1aOps])
3297 Str += "ARMv8.1a";
3298 else if (FBS[AArch64::HasV8_2aOps])
3299 Str += "ARMv8.2a";
3300 else if (FBS[AArch64::HasV8_3aOps])
3301 Str += "ARMv8.3a";
3302 else if (FBS[AArch64::HasV8_4aOps])
3303 Str += "ARMv8.4a";
3304 else if (FBS[AArch64::HasV8_5aOps])
3305 Str += "ARMv8.5a";
3306 else if (FBS[AArch64::HasV8_6aOps])
3307 Str += "ARMv8.6a";
3308 else if (FBS[AArch64::HasV8_7aOps])
3309 Str += "ARMv8.7a";
3310 else if (FBS[AArch64::HasV8_8aOps])
3311 Str += "ARMv8.8a";
3312 else if (FBS[AArch64::HasV9_0aOps])
3313 Str += "ARMv9-a";
3314 else if (FBS[AArch64::HasV9_1aOps])
3315 Str += "ARMv9.1a";
3316 else if (FBS[AArch64::HasV9_2aOps])
3317 Str += "ARMv9.2a";
3318 else if (FBS[AArch64::HasV9_3aOps])
3319 Str += "ARMv9.3a";
3320 else if (FBS[AArch64::HasV8_0rOps])
3321 Str += "ARMv8r";
3322 else {
3323 SmallVector<std::string, 2> ExtMatches;
3324 for (const auto& Ext : ExtensionMap) {
3325 // Use & in case multiple features are enabled
3326 if ((FBS & Ext.Features) != FeatureBitset())
3327 ExtMatches.push_back(Ext.Name);
3328 }
3329 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3330 }
3331}
3332
3333void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3334 SMLoc S) {
3335 const uint16_t Op2 = Encoding & 7;
3336 const uint16_t Cm = (Encoding & 0x78) >> 3;
3337 const uint16_t Cn = (Encoding & 0x780) >> 7;
3338 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3339
3340 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3341
3342 Operands.push_back(
3343 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3344 Operands.push_back(
3345 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3346 Operands.push_back(
3347 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3348 Expr = MCConstantExpr::create(Op2, getContext());
3349 Operands.push_back(
3350 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3351}
3352
3353/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3354/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3355bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3356 OperandVector &Operands) {
3357 if (Name.contains('.'))
3358 return TokError("invalid operand");
3359
3360 Mnemonic = Name;
3361 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3362
3363 const AsmToken &Tok = getTok();
3364 StringRef Op = Tok.getString();
3365 SMLoc S = Tok.getLoc();
3366
3367 if (Mnemonic == "ic") {
3368 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3369 if (!IC)
3370 return TokError("invalid operand for IC instruction");
3371 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3372 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3373 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
3374 return TokError(Str);
3375 }
3376 createSysAlias(IC->Encoding, Operands, S);
3377 } else if (Mnemonic == "dc") {
3378 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3379 if (!DC)
3380 return TokError("invalid operand for DC instruction");
3381 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3382 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3383 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3384 return TokError(Str);
3385 }
3386 createSysAlias(DC->Encoding, Operands, S);
3387 } else if (Mnemonic == "at") {
3388 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3389 if (!AT)
3390 return TokError("invalid operand for AT instruction");
3391 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3392 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3393 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
3394 return TokError(Str);
3395 }
3396 createSysAlias(AT->Encoding, Operands, S);
3397 } else if (Mnemonic == "tlbi") {
3398 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3399 if (!TLBI)
3400 return TokError("invalid operand for TLBI instruction");
3401 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3402 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3403 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
3404 return TokError(Str);
3405 }
3406 createSysAlias(TLBI->Encoding, Operands, S);
3407 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
3408 const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
3409 if (!PRCTX)
3410 return TokError("invalid operand for prediction restriction instruction");
3411 else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
3412 std::string Str(
3413 Mnemonic.upper() + std::string(PRCTX->Name) + " requires: ");
3414 setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
3415 return TokError(Str);
3416 }
3417 uint16_t PRCTX_Op2 =
3418 Mnemonic == "cfp" ? 4 :
3419 Mnemonic == "dvp" ? 5 :
3420 Mnemonic == "cpp" ? 7 :
3421 0;
3422 assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction")(static_cast <bool> (PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction"
) ? void (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3422
, __extension__ __PRETTY_FUNCTION__))
;
3423 createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
3424 }
3425
3426 Lex(); // Eat operand.
3427
3428 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3429 bool HasRegister = false;
3430
3431 // Check for the optional register operand.
3432 if (parseOptionalToken(AsmToken::Comma)) {
3433 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3434 return TokError("expected register operand");
3435 HasRegister = true;
3436 }
3437
3438 if (ExpectRegister && !HasRegister)
3439 return TokError("specified " + Mnemonic + " op requires a register");
3440 else if (!ExpectRegister && HasRegister)
3441 return TokError("specified " + Mnemonic + " op does not use a register");
3442
3443 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3444 return true;
3445
3446 return false;
3447}
3448
3449OperandMatchResultTy
3450AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3451 MCAsmParser &Parser = getParser();
3452 const AsmToken &Tok = getTok();
3453
3454 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3455 TokError("'csync' operand expected");
3456 return MatchOperand_ParseFail;
3457 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3458 // Immediate operand.
3459 const MCExpr *ImmVal;
3460 SMLoc ExprLoc = getLoc();
3461 AsmToken IntTok = Tok;
3462 if (getParser().parseExpression(ImmVal))
3463 return MatchOperand_ParseFail;
3464 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3465 if (!MCE) {
3466 Error(ExprLoc, "immediate value expected for barrier operand");
3467 return MatchOperand_ParseFail;
3468 }
3469 int64_t Value = MCE->getValue();
3470 if (Mnemonic == "dsb" && Value > 15) {
3471 // This case is a no match here, but it might be matched by the nXS
3472 // variant. Deliberately not unlex the optional '#' as it is not necessary
3473 // to characterize an integer immediate.
3474 Parser.getLexer().UnLex(IntTok);
3475 return MatchOperand_NoMatch;
3476 }
3477 if (Value < 0 || Value > 15) {
3478 Error(ExprLoc, "barrier operand out of range");
3479 return MatchOperand_ParseFail;
3480 }
3481 auto DB = AArch64DB::lookupDBByEncoding(Value);
3482 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3483 ExprLoc, getContext(),
3484 false /*hasnXSModifier*/));
3485 return MatchOperand_Success;
3486 }
3487
3488 if (Tok.isNot(AsmToken::Identifier)) {
3489 TokError("invalid operand for instruction");
3490 return MatchOperand_ParseFail;
3491 }
3492
3493 StringRef Operand = Tok.getString();
3494 auto TSB = AArch64TSB::lookupTSBByName(Operand);
3495 auto DB = AArch64DB::lookupDBByName(Operand);
3496 // The only valid named option for ISB is 'sy'
3497 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3498 TokError("'sy' or #imm operand expected");
3499 return MatchOperand_ParseFail;
3500 // The only valid named option for TSB is 'csync'
3501 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3502 TokError("'csync' operand expected");
3503 return MatchOperand_ParseFail;
3504 } else if (!DB && !TSB) {
3505 if (Mnemonic == "dsb") {
3506 // This case is a no match here, but it might be matched by the nXS
3507 // variant.
3508 return MatchOperand_NoMatch;
3509 }
3510 TokError("invalid barrier option name");
3511 return MatchOperand_ParseFail;
3512 }
3513
3514 Operands.push_back(AArch64Operand::CreateBarrier(
3515 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3516 getContext(), false /*hasnXSModifier*/));
3517 Lex(); // Consume the option
3518
3519 return MatchOperand_Success;
3520}
3521
3522OperandMatchResultTy
3523AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3524 const AsmToken &Tok = getTok();
3525
3526 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands")(static_cast <bool> (Mnemonic == "dsb" && "Instruction does not accept nXS operands"
) ? void (0) : __assert_fail ("Mnemonic == \"dsb\" && \"Instruction does not accept nXS operands\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3526
, __extension__ __PRETTY_FUNCTION__))
;
3527 if (Mnemonic != "dsb")
3528 return MatchOperand_ParseFail;
3529
3530 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3531 // Immediate operand.
3532 const MCExpr *ImmVal;
3533 SMLoc ExprLoc = getLoc();
3534 if (getParser().parseExpression(ImmVal))
3535 return MatchOperand_ParseFail;
3536 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3537 if (!MCE) {
3538 Error(ExprLoc, "immediate value expected for barrier operand");
3539 return MatchOperand_ParseFail;
3540 }
3541 int64_t Value = MCE->getValue();
3542 // v8.7-A DSB in the nXS variant accepts only the following immediate
3543 // values: 16, 20, 24, 28.
3544 if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
3545 Error(ExprLoc, "barrier operand out of range");
3546 return MatchOperand_ParseFail;
3547 }
3548 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
3549 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
3550 ExprLoc, getContext(),
3551 true /*hasnXSModifier*/));
3552 return MatchOperand_Success;
3553 }
3554
3555 if (Tok.isNot(AsmToken::Identifier)) {
3556 TokError("invalid operand for instruction");
3557 return MatchOperand_ParseFail;
3558 }
3559
3560 StringRef Operand = Tok.getString();
3561 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
3562
3563 if (!DB) {
3564 TokError("invalid barrier option name");
3565 return MatchOperand_ParseFail;
3566 }
3567
3568 Operands.push_back(
3569 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
3570 getContext(), true /*hasnXSModifier*/));
3571 Lex(); // Consume the option
3572
3573 return MatchOperand_Success;
3574}
3575
3576OperandMatchResultTy
3577AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3578 const AsmToken &Tok = getTok();
3579
3580 if (Tok.isNot(AsmToken::Identifier))
3581 return MatchOperand_NoMatch;
3582
3583 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
3584 return MatchOperand_NoMatch;
3585
3586 int MRSReg, MSRReg;
3587 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3588 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3589 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3590 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3591 } else
3592 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3593
3594 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3595 unsigned PStateImm = -1;
3596 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3597 PStateImm = PState->Encoding;
3598
3599 Operands.push_back(
3600 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3601 PStateImm, getContext()));
3602 Lex(); // Eat identifier
3603
3604 return MatchOperand_Success;
3605}
3606
3607/// tryParseNeonVectorRegister - Parse a vector register operand.
3608bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3609 if (getTok().isNot(AsmToken::Identifier))
3610 return true;
3611
3612 SMLoc S = getLoc();
3613 // Check for a vector register specifier first.
3614 StringRef Kind;
3615 unsigned Reg;
3616 OperandMatchResultTy Res =
3617 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3618 if (Res != MatchOperand_Success)
3619 return true;
3620
3621 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3622 if (!KindRes)
3623 return true;
3624
3625 unsigned ElementWidth = KindRes->second;
3626 Operands.push_back(
3627 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3628 S, getLoc(), getContext()));
3629
3630 // If there was an explicit qualifier, that goes on as a literal text
3631 // operand.
3632 if (!Kind.empty())
3633 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
3634
3635 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3636}
3637
3638OperandMatchResultTy
3639AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3640 SMLoc SIdx = getLoc();
3641 if (parseOptionalToken(AsmToken::LBrac)) {
3642 const MCExpr *ImmVal;
3643 if (getParser().parseExpression(ImmVal))
3644 return MatchOperand_NoMatch;
3645 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3646 if (!MCE) {
3647 TokError("immediate value expected for vector index");
3648 return MatchOperand_ParseFail;;
3649 }
3650
3651 SMLoc E = getLoc();
3652
3653 if (parseToken(AsmToken::RBrac, "']' expected"))
3654 return MatchOperand_ParseFail;;
3655
3656 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3657 E, getContext()));
3658 return MatchOperand_Success;
3659 }
3660
3661 return MatchOperand_NoMatch;
3662}
3663
3664// tryParseVectorRegister - Try to parse a vector register name with
3665// optional kind specifier. If it is a register specifier, eat the token
3666// and return it.
3667OperandMatchResultTy
3668AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3669 RegKind MatchKind) {
3670 const AsmToken &Tok = getTok();
3671
3672 if (Tok.isNot(AsmToken::Identifier))
3673 return MatchOperand_NoMatch;
3674
3675 StringRef Name = Tok.getString();
3676 // If there is a kind specifier, it's separated from the register name by
3677 // a '.'.
3678 size_t Start = 0, Next = Name.find('.');
3679 StringRef Head = Name.slice(Start, Next);
3680 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3681
3682 if (RegNum) {
3683 if (Next != StringRef::npos) {
3684 Kind = Name.slice(Next, StringRef::npos);
3685 if (!isValidVectorKind(Kind, MatchKind)) {
3686 TokError("invalid vector kind qualifier");
3687 return MatchOperand_ParseFail;
3688 }
3689 }
3690 Lex(); // Eat the register token.
3691
3692 Reg = RegNum;
3693 return MatchOperand_Success;
3694 }
3695
3696 return MatchOperand_NoMatch;
3697}
3698
3699/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3700OperandMatchResultTy
3701AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3702 // Check for a SVE predicate register specifier first.
3703 const SMLoc S = getLoc();
3704 StringRef Kind;
3705 unsigned RegNum;
3706 auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3707 if (Res != MatchOperand_Success)
3708 return Res;
3709
3710 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3711 if (!KindRes)
3712 return MatchOperand_NoMatch;
3713
3714 unsigned ElementWidth = KindRes->second;
3715 Operands.push_back(AArch64Operand::CreateVectorReg(
3716 RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3717 getLoc(), getContext()));
3718
3719 if (getLexer().is(AsmToken::LBrac)) {
3720 // Indexed predicate, there's no comma so try parse the next operand
3721 // immediately.
3722 if (parseOperand(Operands, false, false))
3723 return MatchOperand_NoMatch;
3724 }
3725
3726 // Not all predicates are followed by a '/m' or '/z'.
3727 if (getTok().isNot(AsmToken::Slash))
3728 return MatchOperand_Success;
3729
3730 // But when they do they shouldn't have an element type suffix.
3731 if (!Kind.empty()) {
3732 Error(S, "not expecting size suffix");
3733 return MatchOperand_ParseFail;
3734 }
3735
3736 // Add a literal slash as operand
3737 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
3738
3739 Lex(); // Eat the slash.
3740
3741 // Zeroing or merging?
3742 auto Pred = getTok().getString().lower();
3743 if (Pred != "z" && Pred != "m") {
3744 Error(getLoc(), "expecting 'm' or 'z' predication");
3745 return MatchOperand_ParseFail;
3746 }
3747
3748 // Add zero/merge token.
3749 const char *ZM = Pred == "z" ? "z" : "m";
3750 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
3751
3752 Lex(); // Eat zero/merge token.
3753 return MatchOperand_Success;
3754}
3755
3756/// parseRegister - Parse a register operand.
3757bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3758 // Try for a Neon vector register.
3759 if (!tryParseNeonVectorRegister(Operands))
3760 return false;
3761
3762 // Otherwise try for a scalar register.
3763 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3764 return false;
3765
3766 return true;
3767}
3768
3769bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3770 bool HasELFModifier = false;
3771 AArch64MCExpr::VariantKind RefKind;
3772
3773 if (parseOptionalToken(AsmToken::Colon)) {
3774 HasELFModifier = true;
3775
3776 if (getTok().isNot(AsmToken::Identifier))
3777 return TokError("expect relocation specifier in operand after ':'");
3778
3779 std::string LowerCase = getTok().getIdentifier().lower();
3780 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3781 .Case("lo12", AArch64MCExpr::VK_LO12)
3782 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3783 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3784 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3785 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3786 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3787 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3788 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3789 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3790 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3791 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3792 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3793 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3794 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3795 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3796 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3797 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3798 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3799 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3800 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3801 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3802 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3803 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3804 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3805 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3806 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3807 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3808 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3809 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3810 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3811 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3812 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3813 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3814 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3815 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3816 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3817 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3818 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
3819 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3820 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3821 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3822 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3823 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3824 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3825 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3826 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3827 .Default(AArch64MCExpr::VK_INVALID);
3828
3829 if (RefKind == AArch64MCExpr::VK_INVALID)
3830 return TokError("expect relocation specifier in operand after ':'");
3831
3832 Lex(); // Eat identifier
3833
3834 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3835 return true;
3836 }
3837
3838 if (getParser().parseExpression(ImmVal))
3839 return true;
3840
3841 if (HasELFModifier)
3842 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3843
3844 return false;
3845}
3846
3847OperandMatchResultTy
3848AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
3849 if (getTok().isNot(AsmToken::LCurly))
3850 return MatchOperand_NoMatch;
3851
3852 auto ParseMatrixTile = [this](unsigned &Reg, unsigned &ElementWidth) {
3853 StringRef Name = getTok().getString();
3854 size_t DotPosition = Name.find('.');
3855 if (DotPosition == StringRef::npos)
3856 return MatchOperand_NoMatch;
3857
3858 unsigned RegNum = matchMatrixTileListRegName(Name);
3859 if (!RegNum)
3860 return MatchOperand_NoMatch;
3861
3862 StringRef Tail = Name.drop_front(DotPosition);
3863 const Optional<std::pair<int, int>> &KindRes =
3864 parseVectorKind(Tail, RegKind::Matrix);
3865 if (!KindRes) {
3866 TokError("Expected the register to be followed by element width suffix");
3867 return MatchOperand_ParseFail;
3868 }
3869 ElementWidth = KindRes->second;
3870 Reg = RegNum;
3871 Lex(); // Eat the register.
3872 return MatchOperand_Success;
3873 };
3874
3875 SMLoc S = getLoc();
3876 auto LCurly = getTok();
3877 Lex(); // Eat left bracket token.
3878
3879 // Empty matrix list
3880 if (parseOptionalToken(AsmToken::RCurly)) {
3881 Operands.push_back(AArch64Operand::CreateMatrixTileList(
3882 /*RegMask=*/0, S, getLoc(), getContext()));
3883 return MatchOperand_Success;
3884 }
3885
3886 // Try parse {za} alias early
3887 if (getTok().getString().equals_insensitive("za")) {
3888 Lex(); // Eat 'za'
3889
3890 if (parseToken(AsmToken::RCurly, "'}' expected"))
3891 return MatchOperand_ParseFail;
3892
3893 Operands.push_back(AArch64Operand::CreateMatrixTileList(
3894 /*RegMask=*/0xFF, S, getLoc(), getContext()));
3895 return MatchOperand_Success;
3896 }
3897
3898 SMLoc TileLoc = getLoc();
3899
3900 unsigned FirstReg, ElementWidth;
3901 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
3902 if (ParseRes != MatchOperand_Success) {
3903 getLexer().UnLex(LCurly);
3904 return ParseRes;
3905 }
3906
3907 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3908
3909 unsigned PrevReg = FirstReg;
3910 unsigned Count = 1;
3911
3912 SmallSet<unsigned, 8> DRegs;
3913 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
3914
3915 SmallSet<unsigned, 8> SeenRegs;
3916 SeenRegs.insert(FirstReg);
3917
3918 while (parseOptionalToken(AsmToken::Comma)) {
3919 TileLoc = getLoc();
3920 unsigned Reg, NextElementWidth;
3921 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
3922 if (ParseRes != MatchOperand_Success)
3923 return ParseRes;
3924
3925 // Element size must match on all regs in the list.
3926 if (ElementWidth != NextElementWidth) {
3927 Error(TileLoc, "mismatched register size suffix");
3928 return MatchOperand_ParseFail;
3929 }
3930
3931 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
3932 Warning(TileLoc, "tile list not in ascending order");
3933
3934 if (SeenRegs.contains(Reg))
3935 Warning(TileLoc, "duplicate tile in list");
3936 else {
3937 SeenRegs.insert(Reg);
3938 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
3939 }
3940
3941 PrevReg = Reg;
3942 ++Count;
3943 }
3944
3945 if (parseToken(AsmToken::RCurly, "'}' expected"))
3946 return MatchOperand_ParseFail;
3947
3948 unsigned RegMask = 0;
3949 for (auto Reg : DRegs)
3950 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
3951 RI->getEncodingValue(AArch64::ZAD0));
3952 Operands.push_back(
3953 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
3954
3955 return MatchOperand_Success;
3956}
3957
3958template <RegKind VectorKind>
3959OperandMatchResultTy
3960AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3961 bool ExpectMatch) {
3962 MCAsmParser &Parser = getParser();
3963 if (!getTok().is(AsmToken::LCurly))
3964 return MatchOperand_NoMatch;
3965
3966 // Wrapper around parse function
3967 auto ParseVector = [this](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3968 bool NoMatchIsError) {
3969 auto RegTok = getTok();
3970 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3971 if (ParseRes == MatchOperand_Success) {
3972 if (parseVectorKind(Kind, VectorKind))
3973 return ParseRes;
3974 llvm_unreachable("Expected a valid vector kind")::llvm::llvm_unreachable_internal("Expected a valid vector kind"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3974
)
;
3975 }
3976
3977 if (RegTok.isNot(AsmToken::Identifier) ||
3978 ParseRes == MatchOperand_ParseFail ||
3979 (ParseRes == MatchOperand_NoMatch && NoMatchIsError &&
3980 !RegTok.getString().startswith_insensitive("za"))) {
3981 Error(Loc, "vector register expected");
3982 return MatchOperand_ParseFail;
3983 }
3984
3985 return MatchOperand_NoMatch;
3986 };
3987
3988 SMLoc S = getLoc();
3989 auto LCurly = getTok();
3990 Lex(); // Eat left bracket token.
3991
3992 StringRef Kind;
3993 unsigned FirstReg;
3994 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3995
3996 // Put back the original left bracket if there was no match, so that
3997 // different types of list-operands can be matched (e.g. SVE, Neon).
3998 if (ParseRes == MatchOperand_NoMatch)
3999 Parser.getLexer().UnLex(LCurly);
4000
4001 if (ParseRes != MatchOperand_Success)
4002 return ParseRes;
4003
4004 int64_t PrevReg = FirstReg;
4005 unsigned Count = 1;
4006
4007 if (parseOptionalToken(AsmToken::Minus)) {
4008 SMLoc Loc = getLoc();
4009 StringRef NextKind;
4010
4011 unsigned Reg;
4012 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4013 if (ParseRes != MatchOperand_Success)
4014 return ParseRes;
4015
4016 // Any Kind suffices must match on all regs in the list.
4017 if (Kind != NextKind) {
4018 Error(Loc, "mismatched register size suffix");
4019 return MatchOperand_ParseFail;
4020 }
4021
4022 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
4023
4024 if (Space == 0 || Space > 3) {
4025 Error(Loc, "invalid number of vectors");
4026 return MatchOperand_ParseFail;
4027 }
4028
4029 Count += Space;
4030 }
4031 else {
4032 while (parseOptionalToken(AsmToken::Comma)) {
4033 SMLoc Loc = getLoc();
4034 StringRef NextKind;
4035 unsigned Reg;
4036 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4037 if (ParseRes != MatchOperand_Success)
4038 return ParseRes;
4039
4040 // Any Kind suffices must match on all regs in the list.
4041 if (Kind != NextKind) {
4042 Error(Loc, "mismatched register size suffix");
4043 return MatchOperand_ParseFail;
4044 }
4045
4046 // Registers must be incremental (with wraparound at 31)
4047 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
4048 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
4049 Error(Loc, "registers must be sequential");
4050 return MatchOperand_ParseFail;
4051 }
4052
4053 PrevReg = Reg;
4054 ++Count;
4055 }
4056 }
4057
4058 if (parseToken(AsmToken::RCurly, "'}' expected"))
4059 return MatchOperand_ParseFail;
4060
4061 if (Count > 4) {
4062 Error(S, "invalid number of vectors");
4063 return MatchOperand_ParseFail;
4064 }
4065
4066 unsigned NumElements = 0;
4067 unsigned ElementWidth = 0;
4068 if (!Kind.empty()) {
4069 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4070 std::tie(NumElements, ElementWidth) = *VK;
4071 }
4072
4073 Operands.push_back(AArch64Operand::CreateVectorList(
4074 FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
4075 getContext()));
4076
4077 return MatchOperand_Success;
4078}
4079
4080/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4081bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4082 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4083 if (ParseRes != MatchOperand_Success)
4084 return true;
4085
4086 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
4087}
4088
4089OperandMatchResultTy
4090AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4091 SMLoc StartLoc = getLoc();
4092
4093 unsigned RegNum;
4094 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4095 if (Res != MatchOperand_Success)
4096 return Res;
4097
4098 if (!parseOptionalToken(AsmToken::Comma)) {
4099 Operands.push_back(AArch64Operand::CreateReg(
4100 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4101 return MatchOperand_Success;
4102 }
4103
4104 parseOptionalToken(AsmToken::Hash);
4105
4106 if (getTok().isNot(AsmToken::Integer)) {
4107 Error(getLoc(), "index must be absent or #0");
4108 return MatchOperand_ParseFail;
4109 }
4110
4111 const MCExpr *ImmVal;
4112 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4113 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
4114 Error(getLoc(), "index must be absent or #0");
4115 return MatchOperand_ParseFail;
4116 }
4117
4118 Operands.push_back(AArch64Operand::CreateReg(
4119 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4120 return MatchOperand_Success;
4121}
4122
4123template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4124OperandMatchResultTy
4125AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4126 SMLoc StartLoc = getLoc();
4127
4128 unsigned RegNum;
4129 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4130 if (Res != MatchOperand_Success)
4131 return Res;
4132
4133 // No shift/extend is the default.
4134 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4135 Operands.push_back(AArch64Operand::CreateReg(
4136 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4137 return MatchOperand_Success;
4138 }
4139
4140 // Eat the comma
4141 Lex();
4142
4143 // Match the shift
4144 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
4145 Res = tryParseOptionalShiftExtend(ExtOpnd);
4146 if (Res != MatchOperand_Success)
4147 return Res;
4148
4149 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4150 Operands.push_back(AArch64Operand::CreateReg(
4151 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4152 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4153 Ext->hasShiftExtendAmount()));
4154
4155 return MatchOperand_Success;
4156}
4157
4158bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4159 MCAsmParser &Parser = getParser();
4160
4161 // Some SVE instructions have a decoration after the immediate, i.e.
4162 // "mul vl". We parse them here and add tokens, which must be present in the
4163 // asm string in the tablegen instruction.
4164 bool NextIsVL =
4165 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4166 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4167 if (!getTok().getString().equals_insensitive("mul") ||
4168 !(NextIsVL || NextIsHash))
4169 return true;
4170
4171 Operands.push_back(
4172 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4173 Lex(); // Eat the "mul"
4174
4175 if (NextIsVL) {
4176 Operands.push_back(
4177 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4178 Lex(); // Eat the "vl"
4179 return false;
4180 }
4181
4182 if (NextIsHash) {
4183 Lex(); // Eat the #
4184 SMLoc S = getLoc();
4185
4186 // Parse immediate operand.
4187 const MCExpr *ImmVal;
4188 if (!Parser.parseExpression(ImmVal))
4189 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4190 Operands.push_back(AArch64Operand::CreateImm(
4191 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4192 getContext()));
4193 return MatchOperand_Success;
4194 }
4195 }
4196
4197 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4198}
4199
4200bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4201 auto Tok = getTok();
4202 if (Tok.isNot(AsmToken::Identifier))
4203 return true;
4204
4205 auto Keyword = Tok.getString();
4206 Keyword = StringSwitch<StringRef>(Keyword.lower())
4207 .Case("sm", "sm")
4208 .Case("za", "za")
4209 .Default(Keyword);
4210 Operands.push_back(
4211 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4212
4213 Lex();
4214 return false;
4215}
4216
4217/// parseOperand - Parse a arm instruction operand. For now this parses the
4218/// operand regardless of the mnemonic.
4219bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4220 bool invertCondCode) {
4221 MCAsmParser &Parser = getParser();
4222
4223 OperandMatchResultTy ResTy =
4224 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
4225
4226 // Check if the current operand has a custom associated parser, if so, try to
4227 // custom parse the operand, or fallback to the general approach.
4228 if (ResTy == MatchOperand_Success)
4229 return false;
4230 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4231 // there was a match, but an error occurred, in which case, just return that
4232 // the operand parsing failed.
4233 if (ResTy == MatchOperand_ParseFail)
4234 return true;
4235
4236 // Nothing custom, so do general case parsing.
4237 SMLoc S, E;
4238 switch (getLexer().getKind()) {
4239 default: {
4240 SMLoc S = getLoc();
4241 const MCExpr *Expr;
4242 if (parseSymbolicImmVal(Expr))
4243 return Error(S, "invalid operand");
4244
4245 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4246 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4247 return false;
4248 }
4249 case AsmToken::LBrac: {
4250 Operands.push_back(
4251 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4252 Lex(); // Eat '['
4253
4254 // There's no comma after a '[', so we can parse the next operand
4255 // immediately.
4256 return parseOperand(Operands, false, false);
4257 }
4258 case AsmToken::LCurly: {
4259 if (!parseNeonVectorList(Operands))
4260 return false;
4261
4262 Operands.push_back(
4263 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4264 Lex(); // Eat '{'
4265
4266 // There's no comma after a '{', so we can parse the next operand
4267 // immediately.
4268 return parseOperand(Operands, false, false);
4269 }
4270 case AsmToken::Identifier: {
4271 // If we're expecting a Condition Code operand, then just parse that.
4272 if (isCondCode)
4273 return parseCondCode(Operands, invertCondCode);
4274
4275 // If it's a register name, parse it.
4276 if (!parseRegister(Operands))
4277 return false;
4278
4279 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4280 // by SVE instructions.
4281 if (!parseOptionalMulOperand(Operands))
4282 return false;
4283
4284 // If this is an "smstart" or "smstop" instruction, parse its special
4285 // keyword operand as an identifier.
4286 if (Mnemonic == "smstart" || Mnemonic == "smstop")
4287 return parseKeywordOperand(Operands);
4288
4289 // This could be an optional "shift" or "extend" operand.
4290 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
4291 // We can only continue if no tokens were eaten.
4292 if (GotShift != MatchOperand_NoMatch)
4293 return GotShift;
4294
4295 // If this is a two-word mnemonic, parse its special keyword
4296 // operand as an identifier.
4297 if (Mnemonic == "brb")
4298 return parseKeywordOperand(Operands);
4299
4300 // This was not a register so parse other operands that start with an
4301 // identifier (like labels) as expressions and create them as immediates.
4302 const MCExpr *IdVal;
4303 S = getLoc();
4304 if (getParser().parseExpression(IdVal))
4305 return true;
4306 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4307 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4308 return false;
4309 }
4310 case AsmToken::Integer:
4311 case AsmToken::Real:
4312 case AsmToken::Hash: {
4313 // #42 -> immediate.
4314 S = getLoc();
4315
4316 parseOptionalToken(AsmToken::Hash);
4317
4318 // Parse a negative sign
4319 bool isNegative = false;
4320 if (getTok().is(AsmToken::Minus)) {
4321 isNegative = true;
4322 // We need to consume this token only when we have a Real, otherwise
4323 // we let parseSymbolicImmVal take care of it
4324 if (Parser.getLexer().peekTok().is(AsmToken::Real))
4325 Lex();
4326 }
4327
4328 // The only Real that should come through here is a literal #0.0 for
4329 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4330 // so convert the value.
4331 const AsmToken &Tok = getTok();
4332 if (Tok.is(AsmToken::Real)) {
4333 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4334 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4335 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4336 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4337 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4338 return TokError("unexpected floating point literal");
4339 else if (IntVal != 0 || isNegative)
4340 return TokError("expected floating-point constant #0.0");
4341 Lex(); // Eat the token.
4342
4343 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4344 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4345 return false;
4346 }
4347
4348 const MCExpr *ImmVal;
4349 if (parseSymbolicImmVal(ImmVal))
4350 return true;
4351
4352 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4353 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4354 return false;
4355 }
4356 case AsmToken::Equal: {
4357 SMLoc Loc = getLoc();
4358 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4359 return TokError("unexpected token in operand");
4360 Lex(); // Eat '='
4361 const MCExpr *SubExprVal;
4362 if (getParser().parseExpression(SubExprVal))
4363 return true;
4364
4365 if (Operands.size() < 2 ||
4366 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4367 return Error(Loc, "Only valid when first operand is register");
4368
4369 bool IsXReg =
4370 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4371 Operands[1]->getReg());
4372
4373 MCContext& Ctx = getContext();
4374 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4375 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4376 if (isa<MCConstantExpr>(SubExprVal)) {
4377 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4378 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4379 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
4380 ShiftAmt += 16;
4381 Imm >>= 16;
4382 }
4383 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4384 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4385 Operands.push_back(AArch64Operand::CreateImm(
4386 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4387 if (ShiftAmt)
4388 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4389 ShiftAmt, true, S, E, Ctx));
4390 return false;
4391 }
4392 APInt Simm = APInt(64, Imm << ShiftAmt);
4393 // check if the immediate is an unsigned or signed 32-bit int for W regs
4394 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4395 return Error(Loc, "Immediate too large for register");
4396 }
4397 // If it is a label or an imm that cannot fit in a movz, put it into CP.
4398 const MCExpr *CPLoc =
4399 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4400 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4401 return false;
4402 }
4403 }
4404}
4405
4406bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4407 const MCExpr *Expr = nullptr;
4408 SMLoc L = getLoc();
4409 if (check(getParser().parseExpression(Expr), L, "expected expression"))
4410 return true;
4411 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4412 if (check(!Value, L, "expected constant expression"))
4413 return true;
4414 Out = Value->getValue();
4415 return false;
4416}
4417
4418bool AArch64AsmParser::parseComma() {
4419 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
4420 return true;
4421 // Eat the comma
4422 Lex();
4423 return false;
4424}
4425
4426bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
4427 unsigned First, unsigned Last) {
4428 unsigned Reg;
27
'Reg' declared without an initial value
4429 SMLoc Start, End;
4430 if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register"))
28
Calling 'AArch64AsmParser::ParseRegister'
37
Returning from 'AArch64AsmParser::ParseRegister'
38
Assuming the condition is false
39
Taking false branch
4431 return true;
4432
4433 // Special handling for FP and LR; they aren't linearly after x28 in
4434 // the registers enum.
4435 unsigned RangeEnd = Last;
4436 if (Base
39.1
'Base' is not equal to X0
== AArch64::X0) {
40
Taking false branch
4437 if (Last == AArch64::FP) {
4438 RangeEnd = AArch64::X28;
4439 if (Reg == AArch64::FP) {
4440 Out = 29;
4441 return false;
4442 }
4443 }
4444 if (Last == AArch64::LR) {
4445 RangeEnd = AArch64::X28;
4446 if (Reg == AArch64::FP) {
4447 Out = 29;
4448 return false;
4449 } else if (Reg == AArch64::LR) {
4450 Out = 30;
4451 return false;
4452 }
4453 }
4454 }
4455
4456 if (check(Reg < First || Reg > RangeEnd, Start,
41
The left operand of '<' is a garbage value
4457 Twine("expected register in range ") +
4458 AArch64InstPrinter::getRegisterName(First) + " to " +
4459 AArch64InstPrinter::getRegisterName(Last)))
4460 return true;
4461 Out = Reg - Base;
4462 return false;
4463}
4464
4465bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
4466 const MCParsedAsmOperand &Op2) const {
4467 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
4468 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
4469 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
4470 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
4471 return MCTargetAsmParser::regsEqual(Op1, Op2);
4472
4473 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&(static_cast <bool> (AOp1.isScalarReg() && AOp2
.isScalarReg() && "Testing equality of non-scalar registers not supported"
) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 4474
, __extension__ __PRETTY_FUNCTION__))
4474 "Testing equality of non-scalar registers not supported")(static_cast <bool> (AOp1.isScalarReg() && AOp2
.isScalarReg() && "Testing equality of non-scalar registers not supported"
) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 4474
, __extension__ __PRETTY_FUNCTION__))
;
4475
4476 // Check if a registers match their sub/super register classes.
4477 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
4478 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
4479 if (AOp1.getRegEqualityTy() == EqualsSubReg)
4480 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
4481 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
4482 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
4483 if (AOp2.getRegEqualityTy() == EqualsSubReg)
4484 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
4485
4486 return false;
4487}
4488
4489/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
4490/// operands.
4491bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
4492 StringRef Name, SMLoc NameLoc,
4493 OperandVector &Operands) {
4494 Name = StringSwitch<StringRef>(Name.lower())
4495 .Case("beq", "b.eq")
4496 .Case("bne", "b.ne")
4497 .Case("bhs", "b.hs")
4498 .Case("bcs", "b.cs")
4499 .Case("blo", "b.lo")
4500 .Case("bcc", "b.cc")
4501 .Case("bmi", "b.mi")
4502 .Case("bpl", "b.pl")
4503 .Case("bvs", "b.vs")
4504 .Case("bvc", "b.vc")
4505 .Case("bhi", "b.hi")
4506 .Case("bls", "b.ls")
4507 .Case("bge", "b.ge")
4508 .Case("blt", "b.lt")
4509 .Case("bgt", "b.gt")
4510 .Case("ble", "b.le")
4511 .Case("bal", "b.al")
4512 .Case("bnv", "b.nv")
4513 .Default(Name);
4514
4515 // First check for the AArch64-specific .req directive.
4516 if (getTok().is(AsmToken::Identifier) &&
4517 getTok().getIdentifier().lower() == ".req") {
4518 parseDirectiveReq(Name, NameLoc);
4519 // We always return 'error' for this, as we're done with this
4520 // statement and don't need to match the 'instruction."
4521 return true;
4522 }
4523
4524 // Create the leading tokens for the mnemonic, split by '.' characters.
4525 size_t Start = 0, Next = Name.find('.');
4526 StringRef Head = Name.slice(Start, Next);
4527
4528 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
4529 // the SYS instruction.
4530 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
4531 Head == "cfp" || Head == "dvp" || Head == "cpp")
4532 return parseSysAlias(Head, NameLoc, Operands);
4533
4534 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
4535 Mnemonic = Head;
4536
4537 // Handle condition codes for a branch mnemonic
4538 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
4539 Start = Next;
4540 Next = Name.find('.', Start + 1);
4541 Head = Name.slice(Start + 1, Next);
4542
4543 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4544 (Head.data() - Name.data()));
4545 AArch64CC::CondCode CC = parseCondCodeString(Head);
4546 if (CC == AArch64CC::Invalid)
4547 return Error(SuffixLoc, "invalid condition code");
4548 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
4549 /*IsSuffix=*/true));
4550 Operands.push_back(
4551 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
4552 }
4553
4554 // Add the remaining tokens in the mnemonic.
4555 while (Next != StringRef::npos) {
4556 Start = Next;
4557 Next = Name.find('.', Start + 1);
4558 Head = Name.slice(Start, Next);
4559 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4560 (Head.data() - Name.data()) + 1);
4561 Operands.push_back(AArch64Operand::CreateToken(
4562 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
4563 }
4564
4565 // Conditional compare instructions have a Condition Code operand, which needs
4566 // to be parsed and an immediate operand created.
4567 bool condCodeFourthOperand =
4568 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
4569 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
4570 Head == "csinc" || Head == "csinv" || Head == "csneg");
4571
4572 // These instructions are aliases to some of the conditional select
4573 // instructions. However, the condition code is inverted in the aliased
4574 // instruction.
4575 //
4576 // FIXME: Is this the correct way to handle these? Or should the parser
4577 // generate the aliased instructions directly?
4578 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
4579 bool condCodeThirdOperand =
4580 (Head == "cinc" || Head == "cinv" || Head == "cneg");
4581
4582 // Read the remaining operands.
4583 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4584
4585 unsigned N = 1;
4586 do {
4587 // Parse and remember the operand.
4588 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
4589 (N == 3 && condCodeThirdOperand) ||
4590 (N == 2 && condCodeSecondOperand),
4591 condCodeSecondOperand || condCodeThirdOperand)) {
4592 return true;
4593 }
4594
4595 // After successfully parsing some operands there are three special cases
4596 // to consider (i.e. notional operands not separated by commas). Two are
4597 // due to memory specifiers:
4598 // + An RBrac will end an address for load/store/prefetch
4599 // + An '!' will indicate a pre-indexed operation.
4600 //
4601 // And a further case is '}', which ends a group of tokens specifying the
4602 // SME accumulator array 'ZA' or tile vector, i.e.
4603 //
4604 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
4605 //
4606 // It's someone else's responsibility to make sure these tokens are sane
4607 // in the given context!
4608
4609 if (parseOptionalToken(AsmToken::RBrac))
4610 Operands.push_back(
4611 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4612 if (parseOptionalToken(AsmToken::Exclaim))
4613 Operands.push_back(
4614 AArch64Operand::CreateToken("!", getLoc(), getContext()));
4615 if (parseOptionalToken(AsmToken::RCurly))
4616 Operands.push_back(
4617 AArch64Operand::CreateToken("}", getLoc(), getContext()));
4618
4619 ++N;
4620 } while (parseOptionalToken(AsmToken::Comma));
4621 }
4622
4623 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4624 return true;
4625
4626 return false;
4627}
4628
4629static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
4630 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31))(static_cast <bool> ((ZReg >= AArch64::Z0) &&
(ZReg <= AArch64::Z31)) ? void (0) : __assert_fail ("(ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 4630
, __extension__ __PRETTY_FUNCTION__))
;
4631 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
4632 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
4633 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
4634 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
4635 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
4636 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
4637}
4638
4639// FIXME: This entire function is a giant hack to provide us with decent
4640// operand range validation/diagnostics until TableGen/MC can be extended
4641// to support autogeneration of this kind of validation.
4642bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
4643 SmallVectorImpl<SMLoc> &Loc) {
4644 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4645 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
4646
4647 // A prefix only applies to the instruction following it. Here we extract
4648 // prefix information for the next instruction before validating the current
4649 // one so that in the case of failure we don't erronously continue using the
4650 // current prefix.
4651 PrefixInfo Prefix = NextPrefix;
4652 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
4653
4654 // Before validating the instruction in isolation we run through the rules
4655 // applicable when it follows a prefix instruction.
4656 // NOTE: brk & hlt can be prefixed but require no additional validation.
4657 if (Prefix.isActive() &&
4658 (Inst.getOpcode() != AArch64::BRK) &&
4659 (Inst.getOpcode() != AArch64::HLT)) {
4660
4661 // Prefixed intructions must have a destructive operand.
4662 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
4663 AArch64::NotDestructive)
4664 return Error(IDLoc, "instruction is unpredictable when following a"
4665 " movprfx, suggest replacing movprfx with mov");
4666
4667 // Destination operands must match.
4668 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
4669 return Error(Loc[0], "instruction is unpredictable when following a"
4670 " movprfx writing to a different destination");
4671
4672 // Destination operand must not be used in any other location.
4673 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
4674 if (Inst.getOperand(i).isReg() &&
4675 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
4676 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
4677 return Error(Loc[0], "instruction is unpredictable when following a"
4678 " movprfx and destination also used as non-destructive"
4679 " source");
4680 }
4681
4682 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
4683 if (Prefix.isPredicated()) {
4684 int PgIdx = -1;
4685
4686 // Find the instructions general predicate.
4687 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
4688 if (Inst.getOperand(i).isReg() &&
4689 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
4690 PgIdx = i;
4691 break;
4692 }
4693
4694 // Instruction must be predicated if the movprfx is predicated.
4695 if (PgIdx == -1 ||
4696 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
4697 return Error(IDLoc, "instruction is unpredictable when following a"
4698 " predicated movprfx, suggest using unpredicated movprfx");
4699
4700 // Instruction must use same general predicate as the movprfx.
4701 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
4702 return Error(IDLoc, "instruction is unpredictable when following a"
4703 " predicated movprfx using a different general predicate");
4704
4705 // Instruction element type must match the movprfx.
4706 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
4707 return Error(IDLoc, "instruction is unpredictable when following a"
4708 " predicated movprfx with a different element size");
4709 }
4710 }
4711
4712 // Check for indexed addressing modes w/ the base register being the
4713 // same as a destination/source register or pair load where
4714 // the Rt == Rt2. All of those are undefined behaviour.
4715 switch (Inst.getOpcode()) {
4716 case AArch64::LDPSWpre:
4717 case AArch64::LDPWpost:
4718 case AArch64::LDPWpre:
4719 case AArch64::LDPXpost:
4720 case AArch64::LDPXpre: {
4721 unsigned Rt = Inst.getOperand(1).getReg();
4722 unsigned Rt2 = Inst.getOperand(2).getReg();
4723 unsigned Rn = Inst.getOperand(3).getReg();
4724 if (RI->isSubRegisterEq(Rn, Rt))
4725 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
4726 "is also a destination");
4727 if (RI->isSubRegisterEq(Rn, Rt2))
4728 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
4729 "is also a destination");
4730 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4731 }
4732 case AArch64::LDPDi:
4733 case AArch64::LDPQi:
4734 case AArch64::LDPSi:
4735 case AArch64::LDPSWi:
4736 case AArch64::LDPWi:
4737 case AArch64::LDPXi: {
4738 unsigned Rt = Inst.getOperand(0).getReg();
4739 unsigned Rt2 = Inst.getOperand(1).getReg();
4740 if (Rt == Rt2)
4741 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4742 break;
4743 }
4744 case AArch64::LDPDpost:
4745 case AArch64::LDPDpre:
4746 case AArch64::LDPQpost:
4747 case AArch64::LDPQpre:
4748 case AArch64::LDPSpost:
4749 case AArch64::LDPSpre:
4750 case AArch64::LDPSWpost: {
4751 unsigned Rt = Inst.getOperand(1).getReg();
4752 unsigned Rt2 = Inst.getOperand(2).getReg();
4753 if (Rt == Rt2)
4754 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4755 break;
4756 }
4757 case AArch64::STPDpost:
4758 case AArch64::STPDpre:
4759 case AArch64::STPQpost:
4760 case AArch64::STPQpre:
4761 case AArch64::STPSpost:
4762 case AArch64::STPSpre:
4763 case AArch64::STPWpost:
4764 case AArch64::STPWpre:
4765 case AArch64::STPXpost:
4766 case AArch64::STPXpre: {
4767 unsigned Rt = Inst.getOperand(1).getReg();
4768 unsigned Rt2 = Inst.getOperand(2).getReg();
4769 unsigned Rn = Inst.getOperand(3).getReg();
4770 if (RI->isSubRegisterEq(Rn, Rt))
4771 return Error(Loc[0], "unpredictable STP instruction, writeback base "
4772 "is also a source");
4773 if (RI->isSubRegisterEq(Rn, Rt2))
4774 return Error(Loc[1], "unpredictable STP instruction, writeback base "
4775 "is also a source");
4776 break;
4777 }
4778 case AArch64::LDRBBpre:
4779 case AArch64::LDRBpre:
4780 case AArch64::LDRHHpre:
4781 case AArch64::LDRHpre:
4782 case AArch64::LDRSBWpre:
4783 case AArch64::LDRSBXpre:
4784 case AArch64::LDRSHWpre:
4785 case AArch64::LDRSHXpre:
4786 case AArch64::LDRSWpre:
4787 case AArch64::LDRWpre:
4788 case AArch64::LDRXpre:
4789 case AArch64::LDRBBpost:
4790 case AArch64::LDRBpost:
4791 case AArch64::LDRHHpost:
4792 case AArch64::LDRHpost:
4793 case AArch64::LDRSBWpost:
4794 case AArch64::LDRSBXpost:
4795 case AArch64::LDRSHWpost:
4796 case AArch64::LDRSHXpost:
4797 case AArch64::LDRSWpost:
4798 case AArch64::LDRWpost:
4799 case AArch64::LDRXpost: {
4800 unsigned Rt = Inst.getOperand(1).getReg();
4801 unsigned Rn = Inst.getOperand(2).getReg();
4802 if (RI->isSubRegisterEq(Rn, Rt))
4803 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4804 "is also a source");
4805 break;
4806 }
4807 case AArch64::STRBBpost:
4808 case AArch64::STRBpost:
4809 case AArch64::STRHHpost:
4810 case AArch64::STRHpost:
4811 case AArch64::STRWpost:
4812 case AArch64::STRXpost:
4813 case AArch64::STRBBpre:
4814 case AArch64::STRBpre:
4815 case AArch64::STRHHpre:
4816 case AArch64::STRHpre:
4817 case AArch64::STRWpre:
4818 case AArch64::STRXpre: {
4819 unsigned Rt = Inst.getOperand(1).getReg();
4820 unsigned Rn = Inst.getOperand(2).getReg();
4821 if (RI->isSubRegisterEq(Rn, Rt))
4822 return Error(Loc[0], "unpredictable STR instruction, writeback base "
4823 "is also a source");
4824 break;
4825 }
4826 case AArch64::STXRB:
4827 case AArch64::STXRH:
4828 case AArch64::STXRW:
4829 case AArch64::STXRX:
4830 case AArch64::STLXRB:
4831 case AArch64::STLXRH:
4832 case AArch64::STLXRW:
4833 case AArch64::STLXRX: {
4834 unsigned Rs = Inst.getOperand(0).getReg();
4835 unsigned Rt = Inst.getOperand(1).getReg();
4836 unsigned Rn = Inst.getOperand(2).getReg();
4837 if (RI->isSubRegisterEq(Rt, Rs) ||
4838 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4839 return Error(Loc[0],
4840 "unpredictable STXR instruction, status is also a source");
4841 break;
4842 }
4843 case AArch64::STXPW:
4844 case AArch64::STXPX:
4845 case AArch64::STLXPW:
4846 case AArch64::STLXPX: {
4847 unsigned Rs = Inst.getOperand(0).getReg();
4848 unsigned Rt1 = Inst.getOperand(1).getReg();
4849 unsigned Rt2 = Inst.getOperand(2).getReg();
4850 unsigned Rn = Inst.getOperand(3).getReg();
4851 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4852 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4853 return Error(Loc[0],
4854 "unpredictable STXP instruction, status is also a source");
4855 break;
4856 }
4857 case AArch64::LDRABwriteback:
4858 case AArch64::LDRAAwriteback: {
4859 unsigned Xt = Inst.getOperand(0).getReg();
4860 unsigned Xn = Inst.getOperand(1).getReg();
4861 if (Xt == Xn)
4862 return Error(Loc[0],
4863 "unpredictable LDRA instruction, writeback base"
4864 " is also a destination");
4865 break;
4866 }
4867 }
4868
4869 // Check v8.8-A memops instructions.
4870 switch (Inst.getOpcode()) {
4871 case AArch64::CPYFP:
4872 case AArch64::CPYFPWN:
4873 case AArch64::CPYFPRN:
4874 case AArch64::CPYFPN:
4875 case AArch64::CPYFPWT:
4876 case AArch64::CPYFPWTWN:
4877 case AArch64::CPYFPWTRN:
4878 case AArch64::CPYFPWTN:
4879 case AArch64::CPYFPRT:
4880 case AArch64::CPYFPRTWN:
4881 case AArch64::CPYFPRTRN:
4882 case AArch64::CPYFPRTN:
4883 case AArch64::CPYFPT:
4884 case AArch64::CPYFPTWN:
4885 case AArch64::CPYFPTRN:
4886 case AArch64::CPYFPTN:
4887 case AArch64::CPYFM:
4888 case AArch64::CPYFMWN:
4889 case AArch64::CPYFMRN:
4890 case AArch64::CPYFMN:
4891 case AArch64::CPYFMWT:
4892 case AArch64::CPYFMWTWN:
4893 case AArch64::CPYFMWTRN:
4894 case AArch64::CPYFMWTN:
4895 case AArch64::CPYFMRT:
4896 case AArch64::CPYFMRTWN:
4897 case AArch64::CPYFMRTRN:
4898 case AArch64::CPYFMRTN:
4899 case AArch64::CPYFMT:
4900 case AArch64::CPYFMTWN:
4901 case AArch64::CPYFMTRN:
4902 case AArch64::CPYFMTN:
4903 case AArch64::CPYFE:
4904 case AArch64::CPYFEWN:
4905 case AArch64::CPYFERN:
4906 case AArch64::CPYFEN:
4907 case AArch64::CPYFEWT:
4908 case AArch64::CPYFEWTWN:
4909 case AArch64::CPYFEWTRN:
4910 case AArch64::CPYFEWTN:
4911 case AArch64::CPYFERT:
4912 case AArch64::CPYFERTWN:
4913 case AArch64::CPYFERTRN:
4914 case AArch64::CPYFERTN:
4915 case AArch64::CPYFET:
4916 case AArch64::CPYFETWN:
4917 case AArch64::CPYFETRN:
4918 case AArch64::CPYFETN:
4919 case AArch64::CPYP:
4920 case AArch64::CPYPWN:
4921 case AArch64::CPYPRN:
4922 case AArch64::CPYPN:
4923 case AArch64::CPYPWT:
4924 case AArch64::CPYPWTWN:
4925 case AArch64::CPYPWTRN:
4926 case AArch64::CPYPWTN:
4927 case AArch64::CPYPRT:
4928 case AArch64::CPYPRTWN:
4929 case AArch64::CPYPRTRN:
4930 case AArch64::CPYPRTN:
4931 case AArch64::CPYPT:
4932 case AArch64::CPYPTWN:
4933 case AArch64::CPYPTRN:
4934 case AArch64::CPYPTN:
4935 case AArch64::CPYM:
4936 case AArch64::CPYMWN:
4937 case AArch64::CPYMRN:
4938 case AArch64::CPYMN:
4939 case AArch64::CPYMWT:
4940 case AArch64::CPYMWTWN:
4941 case AArch64::CPYMWTRN:
4942 case AArch64::CPYMWTN:
4943 case AArch64::CPYMRT:
4944 case AArch64::CPYMRTWN:
4945 case AArch64::CPYMRTRN:
4946 case AArch64::CPYMRTN:
4947 case AArch64::CPYMT:
4948 case AArch64::CPYMTWN:
4949 case AArch64::CPYMTRN:
4950 case AArch64::CPYMTN:
4951 case AArch64::CPYE:
4952 case AArch64::CPYEWN:
4953 case AArch64::CPYERN:
4954 case AArch64::CPYEN:
4955 case AArch64::CPYEWT:
4956 case AArch64::CPYEWTWN:
4957 case AArch64::CPYEWTRN:
4958 case AArch64::CPYEWTN:
4959 case AArch64::CPYERT:
4960 case AArch64::CPYERTWN:
4961 case AArch64::CPYERTRN:
4962 case AArch64::CPYERTN:
4963 case AArch64::CPYET:
4964 case AArch64::CPYETWN:
4965 case AArch64::CPYETRN:
4966 case AArch64::CPYETN: {
4967 unsigned Xd_wb = Inst.getOperand(0).getReg();
4968 unsigned Xs_wb = Inst.getOperand(1).getReg();
4969 unsigned Xn_wb = Inst.getOperand(2).getReg();
4970 unsigned Xd = Inst.getOperand(3).getReg();
4971 unsigned Xs = Inst.getOperand(4).getReg();
4972 unsigned Xn = Inst.getOperand(5).getReg();
4973 if (Xd_wb != Xd)
4974 return Error(Loc[0],
4975 "invalid CPY instruction, Xd_wb and Xd do not match");
4976 if (Xs_wb != Xs)
4977 return Error(Loc[0],
4978 "invalid CPY instruction, Xs_wb and Xs do not match");
4979 if (Xn_wb != Xn)
4980 return Error(Loc[0],
4981 "invalid CPY instruction, Xn_wb and Xn do not match");
4982 if (Xd == Xs)
4983 return Error(Loc[0], "invalid CPY instruction, destination and source"
4984 " registers are the same");
4985 if (Xd == Xn)
4986 return Error(Loc[0], "invalid CPY instruction, destination and size"
4987 " registers are the same");
4988 if (Xs == Xn)
4989 return Error(Loc[0], "invalid CPY instruction, source and size"
4990 " registers are the same");
4991 break;
4992 }
4993 case AArch64::SETP:
4994 case AArch64::SETPT:
4995 case AArch64::SETPN:
4996 case AArch64::SETPTN:
4997 case AArch64::SETM:
4998 case AArch64::SETMT:
4999 case AArch64::SETMN:
5000 case AArch64::SETMTN:
5001 case AArch64::SETE:
5002 case AArch64::SETET:
5003 case AArch64::SETEN:
5004 case AArch64::SETETN:
5005 case AArch64::SETGP:
5006 case AArch64::SETGPT:
5007 case AArch64::SETGPN:
5008 case AArch64::SETGPTN:
5009 case AArch64::SETGM:
5010 case AArch64::SETGMT:
5011 case AArch64::SETGMN:
5012 case AArch64::SETGMTN:
5013 case AArch64::MOPSSETGE:
5014 case AArch64::MOPSSETGET:
5015 case AArch64::MOPSSETGEN:
5016 case AArch64::MOPSSETGETN: {
5017 unsigned Xd_wb = Inst.getOperand(0).getReg();
5018 unsigned Xn_wb = Inst.getOperand(1).getReg();
5019 unsigned Xd = Inst.getOperand(2).getReg();
5020 unsigned Xn = Inst.getOperand(3).getReg();
5021 unsigned Xm = Inst.getOperand(4).getReg();
5022 if (Xd_wb != Xd)
5023 return Error(Loc[0],
5024 "invalid SET instruction, Xd_wb and Xd do not match");
5025 if (Xn_wb != Xn)
5026 return Error(Loc[0],
5027 "invalid SET instruction, Xn_wb and Xn do not match");
5028 if (Xd == Xn)
5029 return Error(Loc[0], "invalid SET instruction, destination and size"
5030 " registers are the same");
5031 if (Xd == Xm)
5032 return Error(Loc[0], "invalid SET instruction, destination and source"
5033 " registers are the same");
5034 if (Xn == Xm)
5035 return Error(Loc[0], "invalid SET instruction, source and size"
5036 " registers are the same");
5037 break;
5038 }
5039 }
5040
5041 // Now check immediate ranges. Separate from the above as there is overlap
5042 // in the instructions being checked and this keeps the nested conditionals
5043 // to a minimum.
5044 switch (Inst.getOpcode()) {
5045 case AArch64::ADDSWri:
5046 case AArch64::ADDSXri:
5047 case AArch64::ADDWri:
5048 case AArch64::ADDXri:
5049 case AArch64::SUBSWri:
5050 case AArch64::SUBSXri:
5051 case AArch64::SUBWri:
5052 case AArch64::SUBXri: {
5053 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5054 // some slight duplication here.
5055 if (Inst.getOperand(2).isExpr()) {
5056 const MCExpr *Expr = Inst.getOperand(2).getExpr();
5057 AArch64MCExpr::VariantKind ELFRefKind;
5058 MCSymbolRefExpr::VariantKind DarwinRefKind;
5059 int64_t Addend;
5060 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
5061
5062 // Only allow these with ADDXri.
5063 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
5064 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
5065 Inst.getOpcode() == AArch64::ADDXri)
5066 return false;
5067
5068 // Only allow these with ADDXri/ADDWri
5069 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
5070 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
5071 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
5072 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
5073 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
5074 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
5075 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
5076 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
5077 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
5078 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
5079 (Inst.getOpcode() == AArch64::ADDXri ||
5080 Inst.getOpcode() == AArch64::ADDWri))
5081 return false;
5082
5083 // Don't allow symbol refs in the immediate field otherwise
5084 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5085 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5086 // 'cmp w0, 'borked')
5087 return Error(Loc.back(), "invalid immediate expression");
5088 }
5089 // We don't validate more complex expressions here
5090 }
5091 return false;
5092 }
5093 default:
5094 return false;
5095 }
5096}
5097
5098static std::string AArch64MnemonicSpellCheck(StringRef S,
5099 const FeatureBitset &FBS,
5100 unsigned VariantID = 0);
5101
5102bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5103 uint64_t ErrorInfo,
5104 OperandVector &Operands) {
5105 switch (ErrCode) {
5106 case Match_InvalidTiedOperand: {
5107 RegConstraintEqualityTy EqTy =
5108 static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
5109 .getRegEqualityTy();
5110 switch (EqTy) {
5111 case RegConstraintEqualityTy::EqualsSubReg:
5112 return Error(Loc, "operand must be 64-bit form of destination register");
5113 case RegConstraintEqualityTy::EqualsSuperReg:
5114 return Error(Loc, "operand must be 32-bit form of destination register");
5115 case RegConstraintEqualityTy::EqualsReg:
5116 return Error(Loc, "operand must match destination register");
5117 }
5118 llvm_unreachable("Unknown RegConstraintEqualityTy")::llvm::llvm_unreachable_internal("Unknown RegConstraintEqualityTy"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5118
)
;
5119 }
5120 case Match_MissingFeature:
5121 return Error(Loc,
5122 "instruction requires a CPU feature not currently enabled");
5123 case Match_InvalidOperand:
5124 return Error(Loc, "invalid operand for instruction");
5125 case Match_InvalidSuffix:
5126 return Error(Loc, "invalid type suffix for instruction");
5127 case Match_InvalidCondCode:
5128 return Error(Loc, "expected AArch64 condition code");
5129 case Match_AddSubRegExtendSmall:
5130 return Error(Loc,
5131 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5132 case Match_AddSubRegExtendLarge:
5133 return Error(Loc,
5134 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5135 case Match_AddSubSecondSource:
5136 return Error(Loc,
5137 "expected compatible register, symbol or integer in range [0, 4095]");
5138 case Match_LogicalSecondSource:
5139 return Error(Loc, "expected compatible register or logical immediate");
5140 case Match_InvalidMovImm32Shift:
5141 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
5142 case Match_InvalidMovImm64Shift:
5143 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
5144 case Match_AddSubRegShift32:
5145 return Error(Loc,
5146 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5147 case Match_AddSubRegShift64:
5148 return Error(Loc,
5149 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5150 case Match_InvalidFPImm:
5151 return Error(Loc,
5152 "expected compatible register or floating-point constant");
5153 case Match_InvalidMemoryIndexedSImm6:
5154 return Error(Loc, "index must be an integer in range [-32, 31].");
5155 case Match_InvalidMemoryIndexedSImm5:
5156 return Error(Loc, "index must be an integer in range [-16, 15].");
5157 case Match_InvalidMemoryIndexed1SImm4:
5158 return Error(Loc, "index must be an integer in range [-8, 7].");
5159 case Match_InvalidMemoryIndexed2SImm4:
5160 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5161 case Match_InvalidMemoryIndexed3SImm4:
5162 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5163 case Match_InvalidMemoryIndexed4SImm4:
5164 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5165 case Match_InvalidMemoryIndexed16SImm4:
5166 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5167 case Match_InvalidMemoryIndexed32SImm4:
5168 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5169 case Match_InvalidMemoryIndexed1SImm6:
5170 return Error(Loc, "index must be an integer in range [-32, 31].");
5171 case Match_InvalidMemoryIndexedSImm8:
5172 return Error(Loc, "index must be an integer in range [-128, 127].");
5173 case Match_InvalidMemoryIndexedSImm9:
5174 return Error(Loc, "index must be an integer in range [-256, 255].");
5175 case Match_InvalidMemoryIndexed16SImm9:
5176 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5177 case Match_InvalidMemoryIndexed8SImm10:
5178 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5179 case Match_InvalidMemoryIndexed4SImm7:
5180 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5181 case Match_InvalidMemoryIndexed8SImm7:
5182 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5183 case Match_InvalidMemoryIndexed16SImm7:
5184 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5185 case Match_InvalidMemoryIndexed8UImm5:
5186 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5187 case Match_InvalidMemoryIndexed4UImm5:
5188 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5189 case Match_InvalidMemoryIndexed2UImm5:
5190 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5191 case Match_InvalidMemoryIndexed8UImm6:
5192 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5193 case Match_InvalidMemoryIndexed16UImm6:
5194 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5195 case Match_InvalidMemoryIndexed4UImm6:
5196 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5197 case Match_InvalidMemoryIndexed2UImm6:
5198 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5199 case Match_InvalidMemoryIndexed1UImm6:
5200 return Error(Loc, "index must be in range [0, 63].");
5201 case Match_InvalidMemoryWExtend8:
5202 return Error(Loc,
5203 "expected 'uxtw' or 'sxtw' with optional shift of #0");
5204 case Match_InvalidMemoryWExtend16:
5205 return Error(Loc,
5206 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5207 case Match_InvalidMemoryWExtend32:
5208 return Error(Loc,
5209 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5210 case Match_InvalidMemoryWExtend64:
5211 return Error(Loc,
5212 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5213 case Match_InvalidMemoryWExtend128:
5214 return Error(Loc,
5215 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5216 case Match_InvalidMemoryXExtend8:
5217 return Error(Loc,
5218 "expected 'lsl' or 'sxtx' with optional shift of #0");
5219 case Match_InvalidMemoryXExtend16:
5220 return Error(Loc,
5221 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5222 case Match_InvalidMemoryXExtend32:
5223 return Error(Loc,
5224 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5225 case Match_InvalidMemoryXExtend64:
5226 return Error(Loc,
5227 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5228 case Match_InvalidMemoryXExtend128:
5229 return Error(Loc,
5230 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5231 case Match_InvalidMemoryIndexed1:
5232 return Error(Loc, "index must be an integer in range [0, 4095].");
5233 case Match_InvalidMemoryIndexed2:
5234 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
5235 case Match_InvalidMemoryIndexed4:
5236 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
5237 case Match_InvalidMemoryIndexed8:
5238 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
5239 case Match_InvalidMemoryIndexed16:
5240 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
5241 case Match_InvalidImm0_0:
5242 return Error(Loc, "immediate must be 0.");
5243 case Match_InvalidImm0_1:
5244 return Error(Loc, "immediate must be an integer in range [0, 1].");
5245 case Match_InvalidImm0_3:
5246 return Error(Loc, "immediate must be an integer in range [0, 3].");
5247 case Match_InvalidImm0_7:
5248 return Error(Loc, "immediate must be an integer in range [0, 7].");
5249 case Match_InvalidImm0_15:
5250 return Error(Loc, "immediate must be an integer in range [0, 15].");
5251 case Match_InvalidImm0_31:
5252 return Error(Loc, "immediate must be an integer in range [0, 31].");
5253 case Match_InvalidImm0_63:
5254 return Error(Loc, "immediate must be an integer in range [0, 63].");
5255 case Match_InvalidImm0_127:
5256 return Error(Loc, "immediate must be an integer in range [0, 127].");
5257 case Match_InvalidImm0_255:
5258 return Error(Loc, "immediate must be an integer in range [0, 255].");
5259 case Match_InvalidImm0_65535:
5260 return Error(Loc, "immediate must be an integer in range [0, 65535].");
5261 case Match_InvalidImm1_8:
5262 return Error(Loc, "immediate must be an integer in range [1, 8].");
5263 case Match_InvalidImm1_16:
5264 return Error(Loc, "immediate must be an integer in range [1, 16].");
5265 case Match_InvalidImm1_32:
5266 return Error(Loc, "immediate must be an integer in range [1, 32].");
5267 case Match_InvalidImm1_64:
5268 return Error(Loc, "immediate must be an integer in range [1, 64].");
5269 case Match_InvalidSVEAddSubImm8:
5270 return Error(Loc, "immediate must be an integer in range [0, 255]"
5271 " with a shift amount of 0");
5272 case Match_InvalidSVEAddSubImm16:
5273 case Match_InvalidSVEAddSubImm32:
5274 case Match_InvalidSVEAddSubImm64:
5275 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
5276 "multiple of 256 in range [256, 65280]");
5277 case Match_InvalidSVECpyImm8:
5278 return Error(Loc, "immediate must be an integer in range [-128, 255]"
5279 " with a shift amount of 0");
5280 case Match_InvalidSVECpyImm16:
5281 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5282 "multiple of 256 in range [-32768, 65280]");
5283 case Match_InvalidSVECpyImm32:
5284 case Match_InvalidSVECpyImm64:
5285 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5286 "multiple of 256 in range [-32768, 32512]");
5287 case Match_InvalidIndexRange0_0:
5288 return Error(Loc, "expected lane specifier '[0]'");
5289 case Match_InvalidIndexRange1_1:
5290 return Error(Loc, "expected lane specifier '[1]'");
5291 case Match_InvalidIndexRange0_15:
5292 return Error(Loc, "vector lane must be an integer in range [0, 15].");
5293 case Match_InvalidIndexRange0_7:
5294 return Error(Loc, "vector lane must be an integer in range [0, 7].");
5295 case Match_InvalidIndexRange0_3:
5296 return Error(Loc, "vector lane must be an integer in range [0, 3].");
5297 case Match_InvalidIndexRange0_1:
5298 return Error(Loc, "vector lane must be an integer in range [0, 1].");
5299 case Match_InvalidSVEIndexRange0_63:
5300 return Error(Loc, "vector lane must be an integer in range [0, 63].");
5301 case Match_InvalidSVEIndexRange0_31:
5302 return Error(Loc, "vector lane must be an integer in range [0, 31].");
5303 case Match_InvalidSVEIndexRange0_15:
5304 return Error(Loc, "vector lane must be an integer in range [0, 15].");
5305 case Match_InvalidSVEIndexRange0_7:
5306 return Error(Loc, "vector lane must be an integer in range [0, 7].");
5307 case Match_InvalidSVEIndexRange0_3:
5308 return Error(Loc, "vector lane must be an integer in range [0, 3].");
5309 case Match_InvalidLabel:
5310 return Error(Loc, "expected label or encodable integer pc offset");
5311 case Match_MRS:
5312 return Error(Loc, "expected readable system register");
5313 case Match_MSR:
5314 case Match_InvalidSVCR:
5315 return Error(Loc, "expected writable system register or pstate");
5316 case Match_InvalidComplexRotationEven:
5317 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
5318 case Match_InvalidComplexRotationOdd:
5319 return Error(Loc, "complex rotation must be 90 or 270.");
5320 case Match_MnemonicFail: {
5321 std::string Suggestion = AArch64MnemonicSpellCheck(
5322 ((AArch64Operand &)*Operands[0]).getToken(),
5323 ComputeAvailableFeatures(STI->getFeatureBits()));
5324 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
5325 }
5326 case Match_InvalidGPR64shifted8:
5327 return Error(Loc, "register must be x0..x30 or xzr, without shift");
5328 case Match_InvalidGPR64shifted16:
5329 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
5330 case Match_InvalidGPR64shifted32:
5331 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
5332 case Match_InvalidGPR64shifted64:
5333 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
5334 case Match_InvalidGPR64shifted128:
5335 return Error(
5336 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
5337 case Match_InvalidGPR64NoXZRshifted8:
5338 return Error(Loc, "register must be x0..x30 without shift");
5339 case Match_InvalidGPR64NoXZRshifted16:
5340 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
5341 case Match_InvalidGPR64NoXZRshifted32:
5342 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
5343 case Match_InvalidGPR64NoXZRshifted64:
5344 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
5345 case Match_InvalidGPR64NoXZRshifted128:
5346 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
5347 case Match_InvalidZPR32UXTW8:
5348 case Match_InvalidZPR32SXTW8:
5349 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
5350 case Match_InvalidZPR32UXTW16:
5351 case Match_InvalidZPR32SXTW16:
5352 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
5353 case Match_InvalidZPR32UXTW32:
5354 case Match_InvalidZPR32SXTW32:
5355 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
5356 case Match_InvalidZPR32UXTW64:
5357 case Match_InvalidZPR32SXTW64:
5358 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
5359 case Match_InvalidZPR64UXTW8:
5360 case Match_InvalidZPR64SXTW8:
5361 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
5362 case Match_InvalidZPR64UXTW16:
5363 case Match_InvalidZPR64SXTW16:
5364 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
5365 case Match_InvalidZPR64UXTW32:
5366 case Match_InvalidZPR64SXTW32:
5367 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
5368 case Match_InvalidZPR64UXTW64:
5369 case Match_InvalidZPR64SXTW64:
5370 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
5371 case Match_InvalidZPR32LSL8:
5372 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
5373 case Match_InvalidZPR32LSL16:
5374 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
5375 case Match_InvalidZPR32LSL32:
5376 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
5377 case Match_InvalidZPR32LSL64:
5378 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
5379 case Match_InvalidZPR64LSL8:
5380 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
5381 case Match_InvalidZPR64LSL16:
5382 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
5383 case Match_InvalidZPR64LSL32:
5384 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
5385 case Match_InvalidZPR64LSL64:
5386 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
5387 case Match_InvalidZPR0:
5388 return Error(Loc, "expected register without element width suffix");
5389 case Match_InvalidZPR8:
5390 case Match_InvalidZPR16:
5391 case Match_InvalidZPR32:
5392 case Match_InvalidZPR64:
5393 case Match_InvalidZPR128:
5394 return Error(Loc, "invalid element width");
5395 case Match_InvalidZPR_3b8:
5396 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
5397 case Match_InvalidZPR_3b16:
5398 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
5399 case Match_InvalidZPR_3b32:
5400 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
5401 case Match_InvalidZPR_4b16:
5402 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
5403 case Match_InvalidZPR_4b32:
5404 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
5405 case Match_InvalidZPR_4b64:
5406 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
5407 case Match_InvalidSVEPattern:
5408 return Error(Loc, "invalid predicate pattern");
5409 case Match_InvalidSVEPredicateAnyReg:
5410 case Match_InvalidSVEPredicateBReg:
5411 case Match_InvalidSVEPredicateHReg:
5412 case Match_InvalidSVEPredicateSReg:
5413 case Match_InvalidSVEPredicateDReg:
5414 return Error(Loc, "invalid predicate register.");
5415 case Match_InvalidSVEPredicate3bAnyReg:
5416 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
5417 case Match_InvalidSVEExactFPImmOperandHalfOne:
5418 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
5419 case Match_InvalidSVEExactFPImmOperandHalfTwo:
5420 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
5421 case Match_InvalidSVEExactFPImmOperandZeroOne:
5422 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
5423 case Match_InvalidMatrixTileVectorH8:
5424 case Match_InvalidMatrixTileVectorV8:
5425 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
5426 case Match_InvalidMatrixTileVectorH16:
5427 case Match_InvalidMatrixTileVectorV16:
5428 return Error(Loc,
5429 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
5430 case Match_InvalidMatrixTileVectorH32:
5431 case Match_InvalidMatrixTileVectorV32:
5432 return Error(Loc,
5433 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
5434 case Match_InvalidMatrixTileVectorH64:
5435 case Match_InvalidMatrixTileVectorV64:
5436 return Error(Loc,
5437 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
5438 case Match_InvalidMatrixTileVectorH128:
5439 case Match_InvalidMatrixTileVectorV128:
5440 return Error(Loc,
5441 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
5442 case Match_InvalidMatrixTile32:
5443 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
5444 case Match_InvalidMatrixTile64:
5445 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
5446 case Match_InvalidMatrix:
5447 return Error(Loc, "invalid matrix operand, expected za");
5448 case Match_InvalidMatrixIndexGPR32_12_15:
5449 return Error(Loc, "operand must be a register in range [w12, w15]");
5450 default:
5451 llvm_unreachable("unexpected error code!")::llvm::llvm_unreachable_internal("unexpected error code!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5451)
;
5452 }
5453}
5454
5455static const char *getSubtargetFeatureName(uint64_t Val);
5456
5457bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
5458 OperandVector &Operands,
5459 MCStreamer &Out,
5460 uint64_t &ErrorInfo,
5461 bool MatchingInlineAsm) {
5462 assert(!Operands.empty() && "Unexpect empty operand list!")(static_cast <bool> (!Operands.empty() && "Unexpect empty operand list!"
) ? void (0) : __assert_fail ("!Operands.empty() && \"Unexpect empty operand list!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5462
, __extension__ __PRETTY_FUNCTION__))
;
5463 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
5464 assert(Op.isToken() && "Leading operand should always be a mnemonic!")(static_cast <bool> (Op.isToken() && "Leading operand should always be a mnemonic!"
) ? void (0) : __assert_fail ("Op.isToken() && \"Leading operand should always be a mnemonic!\""
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5464
, __extension__ __PRETTY_FUNCTION__))
;
5465
5466 StringRef Tok = Op.getToken();
5467 unsigned NumOperands = Operands.size();
5468
5469 if (NumOperands == 4 && Tok == "lsl") {
5470 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
5471 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5472 if (Op2.isScalarReg() && Op3.isImm()) {
5473 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5474 if (Op3CE) {
5475 uint64_t Op3Val = Op3CE->getValue();
5476 uint64_t NewOp3Val = 0;
5477 uint64_t NewOp4Val = 0;
5478 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
5479 Op2.getReg())) {
5480 NewOp3Val = (32 - Op3Val) & 0x1f;
5481 NewOp4Val = 31 - Op3Val;
5482 } else {
5483 NewOp3Val = (64 - Op3Val) & 0x3f;
5484 NewOp4Val = 63 - Op3Val;
5485 }
5486
5487 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
5488 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
5489
5490 Operands[0] =
5491 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
5492 Operands.push_back(AArch64Operand::CreateImm(
5493 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
5494 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
5495 Op3.getEndLoc(), getContext());
5496 }
5497 }
5498 } else if (NumOperands == 4 && Tok == "bfc") {
5499 // FIXME: Horrible hack to handle BFC->BFM alias.
5500 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5501 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
5502 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
5503
5504 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
5505 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
5506 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
5507
5508 if (LSBCE && WidthCE) {
5509 uint64_t LSB = LSBCE->getValue();
5510 uint64_t Width = WidthCE->getValue();
5511
5512 uint64_t RegWidth = 0;
5513 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5514 Op1.getReg()))
5515 RegWidth = 64;
5516 else
5517 RegWidth = 32;
5518
5519 if (LSB >= RegWidth)
5520 return Error(LSBOp.getStartLoc(),
5521 "expected integer in range [0, 31]");
5522 if (Width < 1 || Width > RegWidth)
5523 return Error(WidthOp.getStartLoc(),
5524 "expected integer in range [1, 32]");
5525
5526 uint64_t ImmR = 0;
5527 if (RegWidth == 32)
5528 ImmR = (32 - LSB) & 0x1f;
5529 else
5530 ImmR = (64 - LSB) & 0x3f;
5531
5532 uint64_t ImmS = Width - 1;
5533
5534 if (ImmR != 0 && ImmS >= ImmR)
5535 return Error(WidthOp.getStartLoc(),
5536 "requested insert overflows register");
5537
5538 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
5539 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
5540 Operands[0] =
5541 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
5542 Operands[2] = AArch64Operand::CreateReg(
5543 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
5544 SMLoc(), SMLoc(), getContext());
5545 Operands[3] = AArch64Operand::CreateImm(
5546 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
5547 Operands.emplace_back(
5548 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
5549 WidthOp.getEndLoc(), getContext()));
5550 }
5551 }
5552 } else if (NumOperands == 5) {
5553 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
5554 // UBFIZ -> UBFM aliases.
5555 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
5556 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5557 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5558 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
5559
5560 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
5561 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5562 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
5563
5564 if (Op3CE && Op4CE) {
5565 uint64_t Op3Val = Op3CE->getValue();
5566 uint64_t Op4Val = Op4CE->getValue();
5567
5568 uint64_t RegWidth = 0;
5569 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5570 Op1.getReg()))
5571 RegWidth = 64;
5572 else
5573 RegWidth = 32;
5574
5575 if (Op3Val >= RegWidth)
5576 return Error(Op3.getStartLoc(),
5577 "expected integer in range [0, 31]");
5578 if (Op4Val < 1 || Op4Val > RegWidth)
5579 return Error(Op4.getStartLoc(),
5580 "expected integer in range [1, 32]");
5581
5582 uint64_t NewOp3Val = 0;
5583 if (RegWidth == 32)
5584 NewOp3Val = (32 - Op3Val) & 0x1f;
5585 else
5586 NewOp3Val = (64 - Op3Val) & 0x3f;
5587
5588 uint64_t NewOp4Val = Op4Val - 1;
5589
5590 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
5591 return Error(Op4.getStartLoc(),
5592 "requested insert overflows register");
5593
5594 const MCExpr *NewOp3 =
5595 MCConstantExpr::create(NewOp3Val, getContext());
5596 const MCExpr *NewOp4 =
5597 MCConstantExpr::create(NewOp4Val, getContext());
5598 Operands[3] = AArch64Operand::CreateImm(
5599 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
5600 Operands[4] = AArch64Operand::CreateImm(
5601 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
5602 if (Tok == "bfi")
5603 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
5604 getContext());
5605 else if (Tok == "sbfiz")
5606 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
5607 getContext());
5608 else if (Tok == "ubfiz")
5609 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
5610 getContext());
5611 else
5612 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5612
)
;
5613 }
5614 }
5615
5616 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
5617 // UBFX -> UBFM aliases.
5618 } else if (NumOperands == 5 &&
5619 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
5620 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5621 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5622 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
5623
5624 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
5625 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5626 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
5627
5628 if (Op3CE && Op4CE) {
5629 uint64_t Op3Val = Op3CE->getValue();
5630 uint64_t Op4Val = Op4CE->getValue();
5631
5632 uint64_t RegWidth = 0;
5633 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5634 Op1.getReg()))
5635 RegWidth = 64;
5636 else
5637 RegWidth = 32;
5638
5639 if (Op3Val >= RegWidth)
5640 return Error(Op3.getStartLoc(),
5641 "expected integer in range [0, 31]");
5642 if (Op4Val < 1 || Op4Val > RegWidth)
5643 return Error(Op4.getStartLoc(),
5644 "expected integer in range [1, 32]");
5645
5646 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
5647
5648 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
5649 return Error(Op4.getStartLoc(),
5650 "requested extract overflows register");
5651
5652 const MCExpr *NewOp4 =
5653 MCConstantExpr::create(NewOp4Val, getContext());
5654 Operands[4] = AArch64Operand::CreateImm(
5655 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
5656 if (Tok == "bfxil")
5657 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
5658 getContext());
5659 else if (Tok == "sbfx")
5660 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
5661 getContext());
5662 else if (Tok == "ubfx")
5663 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
5664 getContext());
5665 else
5666 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5666
)
;
5667 }
5668 }
5669 }
5670 }
5671
5672 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
5673 // instruction for FP registers correctly in some rare circumstances. Convert
5674 // it to a safe instruction and warn (because silently changing someone's
5675 // assembly is rude).
5676 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
5677 NumOperands == 4 && Tok == "movi") {
5678 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5679 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
5680 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5681 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
5682 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
5683 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
5684 if (Suffix.lower() == ".2d" &&
5685 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
5686 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
5687 " correctly on this CPU, converting to equivalent movi.16b");
5688 // Switch the suffix to .16b.
5689 unsigned Idx = Op1.isToken() ? 1 : 2;
5690 Operands[Idx] =
5691 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
5692 }
5693 }
5694 }
5695
5696 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
5697 // InstAlias can't quite handle this since the reg classes aren't
5698 // subclasses.
5699 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
5700 // The source register can be Wn here, but the matcher expects a
5701 // GPR64. Twiddle it here if necessary.
5702 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5703 if (Op.isScalarReg()) {
5704 unsigned Reg = getXRegFromWReg(Op.getReg());
5705 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5706 Op.getStartLoc(), Op.getEndLoc(),
5707 getContext());
5708 }
5709 }
5710 // FIXME: Likewise for sxt[bh] with a Xd dst operand
5711 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
5712 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5713 if (Op.isScalarReg() &&
5714 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5715 Op.getReg())) {
5716 // The source register can be Wn here, but the matcher expects a
5717 // GPR64. Twiddle it here if necessary.
5718 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5719 if (Op.isScalarReg()) {
5720 unsigned Reg = getXRegFromWReg(Op.getReg());
5721 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5722 Op.getStartLoc(),
5723 Op.getEndLoc(), getContext());
5724 }
5725 }
5726 }
5727 // FIXME: Likewise for uxt[bh] with a Xd dst operand
5728 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
5729 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5730 if (Op.isScalarReg() &&
5731 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5732 Op.getReg())) {
5733 // The source register can be Wn here, but the matcher expects a
5734 // GPR32. Twiddle it here if necessary.
5735 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5736 if (Op.isScalarReg()) {
5737 unsigned Reg = getWRegFromXReg(Op.getReg());
5738 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5739 Op.getStartLoc(),
5740 Op.getEndLoc(), getContext());
5741 }
5742 }
5743 }
5744
5745 MCInst Inst;
5746 FeatureBitset MissingFeatures;
5747 // First try to match against the secondary set of tables containing the
5748 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
5749 unsigned MatchResult =
5750 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5751 MatchingInlineAsm, 1);