Bug Summary

File:llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Warning:line 3940, column 15
The left operand of '==' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AArch64AsmParser.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Target/AArch64/AsmParser -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Target/AArch64/AsmParser/.. -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/.. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Target/AArch64/AsmParser -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-04-14-063029-18377-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "MCTargetDesc/AArch64AddressingModes.h"
10#include "MCTargetDesc/AArch64InstPrinter.h"
11#include "MCTargetDesc/AArch64MCExpr.h"
12#include "MCTargetDesc/AArch64MCTargetDesc.h"
13#include "MCTargetDesc/AArch64TargetStreamer.h"
14#include "TargetInfo/AArch64TargetInfo.h"
15#include "AArch64InstrInfo.h"
16#include "Utils/AArch64BaseInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/StringExtras.h"
23#include "llvm/ADT/StringMap.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/StringSwitch.h"
26#include "llvm/ADT/Twine.h"
27#include "llvm/MC/MCContext.h"
28#include "llvm/MC/MCExpr.h"
29#include "llvm/MC/MCInst.h"
30#include "llvm/MC/MCLinkerOptimizationHint.h"
31#include "llvm/MC/MCObjectFileInfo.h"
32#include "llvm/MC/MCParser/MCAsmLexer.h"
33#include "llvm/MC/MCParser/MCAsmParser.h"
34#include "llvm/MC/MCParser/MCAsmParserExtension.h"
35#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
36#include "llvm/MC/MCParser/MCTargetAsmParser.h"
37#include "llvm/MC/MCRegisterInfo.h"
38#include "llvm/MC/MCStreamer.h"
39#include "llvm/MC/MCSubtargetInfo.h"
40#include "llvm/MC/MCSymbol.h"
41#include "llvm/MC/MCTargetOptions.h"
42#include "llvm/MC/SubtargetFeature.h"
43#include "llvm/MC/MCValue.h"
44#include "llvm/Support/Casting.h"
45#include "llvm/Support/Compiler.h"
46#include "llvm/Support/ErrorHandling.h"
47#include "llvm/Support/MathExtras.h"
48#include "llvm/Support/SMLoc.h"
49#include "llvm/Support/TargetParser.h"
50#include "llvm/Support/TargetRegistry.h"
51#include "llvm/Support/raw_ostream.h"
52#include <cassert>
53#include <cctype>
54#include <cstdint>
55#include <cstdio>
56#include <string>
57#include <tuple>
58#include <utility>
59#include <vector>
60
61using namespace llvm;
62
63namespace {
64
65enum class RegKind {
66 Scalar,
67 NeonVector,
68 SVEDataVector,
69 SVEPredicateVector
70};
71
72enum RegConstraintEqualityTy {
73 EqualsReg,
74 EqualsSuperReg,
75 EqualsSubReg
76};
77
78class AArch64AsmParser : public MCTargetAsmParser {
79private:
80 StringRef Mnemonic; ///< Instruction mnemonic.
81
82 // Map of register aliases registers via the .req directive.
83 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
84
85 class PrefixInfo {
86 public:
87 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
88 PrefixInfo Prefix;
89 switch (Inst.getOpcode()) {
90 case AArch64::MOVPRFX_ZZ:
91 Prefix.Active = true;
92 Prefix.Dst = Inst.getOperand(0).getReg();
93 break;
94 case AArch64::MOVPRFX_ZPmZ_B:
95 case AArch64::MOVPRFX_ZPmZ_H:
96 case AArch64::MOVPRFX_ZPmZ_S:
97 case AArch64::MOVPRFX_ZPmZ_D:
98 Prefix.Active = true;
99 Prefix.Predicated = true;
100 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
101 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 102, __PRETTY_FUNCTION__))
102 "No destructive element size set for movprfx")((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 102, __PRETTY_FUNCTION__))
;
103 Prefix.Dst = Inst.getOperand(0).getReg();
104 Prefix.Pg = Inst.getOperand(2).getReg();
105 break;
106 case AArch64::MOVPRFX_ZPzZ_B:
107 case AArch64::MOVPRFX_ZPzZ_H:
108 case AArch64::MOVPRFX_ZPzZ_S:
109 case AArch64::MOVPRFX_ZPzZ_D:
110 Prefix.Active = true;
111 Prefix.Predicated = true;
112 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
113 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 114, __PRETTY_FUNCTION__))
114 "No destructive element size set for movprfx")((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 114, __PRETTY_FUNCTION__))
;
115 Prefix.Dst = Inst.getOperand(0).getReg();
116 Prefix.Pg = Inst.getOperand(1).getReg();
117 break;
118 default:
119 break;
120 }
121
122 return Prefix;
123 }
124
125 PrefixInfo() : Active(false), Predicated(false) {}
126 bool isActive() const { return Active; }
127 bool isPredicated() const { return Predicated; }
128 unsigned getElementSize() const {
129 assert(Predicated)((Predicated) ? static_cast<void> (0) : __assert_fail (
"Predicated", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 129, __PRETTY_FUNCTION__))
;
130 return ElementSize;
131 }
132 unsigned getDstReg() const { return Dst; }
133 unsigned getPgReg() const {
134 assert(Predicated)((Predicated) ? static_cast<void> (0) : __assert_fail (
"Predicated", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 134, __PRETTY_FUNCTION__))
;
135 return Pg;
136 }
137
138 private:
139 bool Active;
140 bool Predicated;
141 unsigned ElementSize;
142 unsigned Dst;
143 unsigned Pg;
144 } NextPrefix;
145
146 AArch64TargetStreamer &getTargetStreamer() {
147 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
148 return static_cast<AArch64TargetStreamer &>(TS);
149 }
150
151 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
152
153 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
154 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
155 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
156 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
157 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
158 bool parseRegister(OperandVector &Operands);
159 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
160 bool parseNeonVectorList(OperandVector &Operands);
161 bool parseOptionalMulOperand(OperandVector &Operands);
162 bool parseKeywordOperand(OperandVector &Operands);
163 bool parseOperand(OperandVector &Operands, bool isCondCode,
164 bool invertCondCode);
165 bool parseImmExpr(int64_t &Out);
166 bool parseComma();
167 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
168 unsigned Last);
169
170 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
171 OperandVector &Operands);
172
173 bool parseDirectiveArch(SMLoc L);
174 bool parseDirectiveArchExtension(SMLoc L);
175 bool parseDirectiveCPU(SMLoc L);
176 bool parseDirectiveInst(SMLoc L);
177
178 bool parseDirectiveTLSDescCall(SMLoc L);
179
180 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
181 bool parseDirectiveLtorg(SMLoc L);
182
183 bool parseDirectiveReq(StringRef Name, SMLoc L);
184 bool parseDirectiveUnreq(SMLoc L);
185 bool parseDirectiveCFINegateRAState();
186 bool parseDirectiveCFIBKeyFrame();
187
188 bool parseDirectiveVariantPCS(SMLoc L);
189
190 bool parseDirectiveSEHAllocStack(SMLoc L);
191 bool parseDirectiveSEHPrologEnd(SMLoc L);
192 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
193 bool parseDirectiveSEHSaveFPLR(SMLoc L);
194 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
195 bool parseDirectiveSEHSaveReg(SMLoc L);
196 bool parseDirectiveSEHSaveRegX(SMLoc L);
197 bool parseDirectiveSEHSaveRegP(SMLoc L);
198 bool parseDirectiveSEHSaveRegPX(SMLoc L);
199 bool parseDirectiveSEHSaveLRPair(SMLoc L);
200 bool parseDirectiveSEHSaveFReg(SMLoc L);
201 bool parseDirectiveSEHSaveFRegX(SMLoc L);
202 bool parseDirectiveSEHSaveFRegP(SMLoc L);
203 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
204 bool parseDirectiveSEHSetFP(SMLoc L);
205 bool parseDirectiveSEHAddFP(SMLoc L);
206 bool parseDirectiveSEHNop(SMLoc L);
207 bool parseDirectiveSEHSaveNext(SMLoc L);
208 bool parseDirectiveSEHEpilogStart(SMLoc L);
209 bool parseDirectiveSEHEpilogEnd(SMLoc L);
210 bool parseDirectiveSEHTrapFrame(SMLoc L);
211 bool parseDirectiveSEHMachineFrame(SMLoc L);
212 bool parseDirectiveSEHContext(SMLoc L);
213 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
214
215 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
216 SmallVectorImpl<SMLoc> &Loc);
217 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
218 OperandVector &Operands, MCStreamer &Out,
219 uint64_t &ErrorInfo,
220 bool MatchingInlineAsm) override;
221/// @name Auto-generated Match Functions
222/// {
223
224#define GET_ASSEMBLER_HEADER
225#include "AArch64GenAsmMatcher.inc"
226
227 /// }
228
229 OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
230 OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
231 RegKind MatchKind);
232 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
233 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
234 OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
235 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
236 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
237 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
238 template <bool IsSVEPrefetch = false>
239 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
240 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
241 OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
242 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
243 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
244 template<bool AddFPZeroAsLiteral>
245 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
246 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
247 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
248 bool tryParseNeonVectorRegister(OperandVector &Operands);
249 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
250 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
251 template <bool ParseShiftExtend,
252 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
253 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
254 template <bool ParseShiftExtend, bool ParseSuffix>
255 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
256 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
257 template <RegKind VectorKind>
258 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
259 bool ExpectMatch = false);
260 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
261 OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands);
262
263public:
264 enum AArch64MatchResultTy {
265 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
266#define GET_OPERAND_DIAGNOSTIC_TYPES
267#include "AArch64GenAsmMatcher.inc"
268 };
269 bool IsILP32;
270
271 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
272 const MCInstrInfo &MII, const MCTargetOptions &Options)
273 : MCTargetAsmParser(Options, STI, MII) {
274 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
275 MCAsmParserExtension::Initialize(Parser);
276 MCStreamer &S = getParser().getStreamer();
277 if (S.getTargetStreamer() == nullptr)
278 new AArch64TargetStreamer(S);
279
280 // Alias .hword/.word/.[dx]word to the target-independent
281 // .2byte/.4byte/.8byte directives as they have the same form and
282 // semantics:
283 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
284 Parser.addAliasForDirective(".hword", ".2byte");
285 Parser.addAliasForDirective(".word", ".4byte");
286 Parser.addAliasForDirective(".dword", ".8byte");
287 Parser.addAliasForDirective(".xword", ".8byte");
288
289 // Initialize the set of available features.
290 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
291 }
292
293 bool regsEqual(const MCParsedAsmOperand &Op1,
294 const MCParsedAsmOperand &Op2) const override;
295 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
296 SMLoc NameLoc, OperandVector &Operands) override;
297 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
298 OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
299 SMLoc &EndLoc) override;
300 bool ParseDirective(AsmToken DirectiveID) override;
301 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
302 unsigned Kind) override;
303
304 static bool classifySymbolRef(const MCExpr *Expr,
305 AArch64MCExpr::VariantKind &ELFRefKind,
306 MCSymbolRefExpr::VariantKind &DarwinRefKind,
307 int64_t &Addend);
308};
309
310/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
311/// instruction.
312class AArch64Operand : public MCParsedAsmOperand {
313private:
314 enum KindTy {
315 k_Immediate,
316 k_ShiftedImm,
317 k_CondCode,
318 k_Register,
319 k_VectorList,
320 k_VectorIndex,
321 k_Token,
322 k_SysReg,
323 k_SysCR,
324 k_Prefetch,
325 k_ShiftExtend,
326 k_FPImm,
327 k_Barrier,
328 k_PSBHint,
329 k_BTIHint,
330 } Kind;
331
332 SMLoc StartLoc, EndLoc;
333
334 struct TokOp {
335 const char *Data;
336 unsigned Length;
337 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
338 };
339
340 // Separate shift/extend operand.
341 struct ShiftExtendOp {
342 AArch64_AM::ShiftExtendType Type;
343 unsigned Amount;
344 bool HasExplicitAmount;
345 };
346
347 struct RegOp {
348 unsigned RegNum;
349 RegKind Kind;
350 int ElementWidth;
351
352 // The register may be allowed as a different register class,
353 // e.g. for GPR64as32 or GPR32as64.
354 RegConstraintEqualityTy EqualityTy;
355
356 // In some cases the shift/extend needs to be explicitly parsed together
357 // with the register, rather than as a separate operand. This is needed
358 // for addressing modes where the instruction as a whole dictates the
359 // scaling/extend, rather than specific bits in the instruction.
360 // By parsing them as a single operand, we avoid the need to pass an
361 // extra operand in all CodeGen patterns (because all operands need to
362 // have an associated value), and we avoid the need to update TableGen to
363 // accept operands that have no associated bits in the instruction.
364 //
365 // An added benefit of parsing them together is that the assembler
366 // can give a sensible diagnostic if the scaling is not correct.
367 //
368 // The default is 'lsl #0' (HasExplicitAmount = false) if no
369 // ShiftExtend is specified.
370 ShiftExtendOp ShiftExtend;
371 };
372
373 struct VectorListOp {
374 unsigned RegNum;
375 unsigned Count;
376 unsigned NumElements;
377 unsigned ElementWidth;
378 RegKind RegisterKind;
379 };
380
381 struct VectorIndexOp {
382 int Val;
383 };
384
385 struct ImmOp {
386 const MCExpr *Val;
387 };
388
389 struct ShiftedImmOp {
390 const MCExpr *Val;
391 unsigned ShiftAmount;
392 };
393
394 struct CondCodeOp {
395 AArch64CC::CondCode Code;
396 };
397
398 struct FPImmOp {
399 uint64_t Val; // APFloat value bitcasted to uint64_t.
400 bool IsExact; // describes whether parsed value was exact.
401 };
402
403 struct BarrierOp {
404 const char *Data;
405 unsigned Length;
406 unsigned Val; // Not the enum since not all values have names.
407 bool HasnXSModifier;
408 };
409
410 struct SysRegOp {
411 const char *Data;
412 unsigned Length;
413 uint32_t MRSReg;
414 uint32_t MSRReg;
415 uint32_t PStateField;
416 };
417
418 struct SysCRImmOp {
419 unsigned Val;
420 };
421
422 struct PrefetchOp {
423 const char *Data;
424 unsigned Length;
425 unsigned Val;
426 };
427
428 struct PSBHintOp {
429 const char *Data;
430 unsigned Length;
431 unsigned Val;
432 };
433
434 struct BTIHintOp {
435 const char *Data;
436 unsigned Length;
437 unsigned Val;
438 };
439
440 struct ExtendOp {
441 unsigned Val;
442 };
443
444 union {
445 struct TokOp Tok;
446 struct RegOp Reg;
447 struct VectorListOp VectorList;
448 struct VectorIndexOp VectorIndex;
449 struct ImmOp Imm;
450 struct ShiftedImmOp ShiftedImm;
451 struct CondCodeOp CondCode;
452 struct FPImmOp FPImm;
453 struct BarrierOp Barrier;
454 struct SysRegOp SysReg;
455 struct SysCRImmOp SysCRImm;
456 struct PrefetchOp Prefetch;
457 struct PSBHintOp PSBHint;
458 struct BTIHintOp BTIHint;
459 struct ShiftExtendOp ShiftExtend;
460 };
461
462 // Keep the MCContext around as the MCExprs may need manipulated during
463 // the add<>Operands() calls.
464 MCContext &Ctx;
465
466public:
467 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
468
469 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
470 Kind = o.Kind;
471 StartLoc = o.StartLoc;
472 EndLoc = o.EndLoc;
473 switch (Kind) {
474 case k_Token:
475 Tok = o.Tok;
476 break;
477 case k_Immediate:
478 Imm = o.Imm;
479 break;
480 case k_ShiftedImm:
481 ShiftedImm = o.ShiftedImm;
482 break;
483 case k_CondCode:
484 CondCode = o.CondCode;
485 break;
486 case k_FPImm:
487 FPImm = o.FPImm;
488 break;
489 case k_Barrier:
490 Barrier = o.Barrier;
491 break;
492 case k_Register:
493 Reg = o.Reg;
494 break;
495 case k_VectorList:
496 VectorList = o.VectorList;
497 break;
498 case k_VectorIndex:
499 VectorIndex = o.VectorIndex;
500 break;
501 case k_SysReg:
502 SysReg = o.SysReg;
503 break;
504 case k_SysCR:
505 SysCRImm = o.SysCRImm;
506 break;
507 case k_Prefetch:
508 Prefetch = o.Prefetch;
509 break;
510 case k_PSBHint:
511 PSBHint = o.PSBHint;
512 break;
513 case k_BTIHint:
514 BTIHint = o.BTIHint;
515 break;
516 case k_ShiftExtend:
517 ShiftExtend = o.ShiftExtend;
518 break;
519 }
520 }
521
522 /// getStartLoc - Get the location of the first token of this operand.
523 SMLoc getStartLoc() const override { return StartLoc; }
524 /// getEndLoc - Get the location of the last token of this operand.
525 SMLoc getEndLoc() const override { return EndLoc; }
526
527 StringRef getToken() const {
528 assert(Kind == k_Token && "Invalid access!")((Kind == k_Token && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 528, __PRETTY_FUNCTION__))
;
529 return StringRef(Tok.Data, Tok.Length);
530 }
531
532 bool isTokenSuffix() const {
533 assert(Kind == k_Token && "Invalid access!")((Kind == k_Token && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 533, __PRETTY_FUNCTION__))
;
534 return Tok.IsSuffix;
535 }
536
537 const MCExpr *getImm() const {
538 assert(Kind == k_Immediate && "Invalid access!")((Kind == k_Immediate && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 538, __PRETTY_FUNCTION__))
;
539 return Imm.Val;
540 }
541
542 const MCExpr *getShiftedImmVal() const {
543 assert(Kind == k_ShiftedImm && "Invalid access!")((Kind == k_ShiftedImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 543, __PRETTY_FUNCTION__))
;
544 return ShiftedImm.Val;
545 }
546
547 unsigned getShiftedImmShift() const {
548 assert(Kind == k_ShiftedImm && "Invalid access!")((Kind == k_ShiftedImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 548, __PRETTY_FUNCTION__))
;
549 return ShiftedImm.ShiftAmount;
550 }
551
552 AArch64CC::CondCode getCondCode() const {
553 assert(Kind == k_CondCode && "Invalid access!")((Kind == k_CondCode && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 553, __PRETTY_FUNCTION__))
;
554 return CondCode.Code;
555 }
556
557 APFloat getFPImm() const {
558 assert (Kind == k_FPImm && "Invalid access!")((Kind == k_FPImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 558, __PRETTY_FUNCTION__))
;
559 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
560 }
561
562 bool getFPImmIsExact() const {
563 assert (Kind == k_FPImm && "Invalid access!")((Kind == k_FPImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 563, __PRETTY_FUNCTION__))
;
564 return FPImm.IsExact;
565 }
566
567 unsigned getBarrier() const {
568 assert(Kind == k_Barrier && "Invalid access!")((Kind == k_Barrier && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 568, __PRETTY_FUNCTION__))
;
569 return Barrier.Val;
570 }
571
572 StringRef getBarrierName() const {
573 assert(Kind == k_Barrier && "Invalid access!")((Kind == k_Barrier && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 573, __PRETTY_FUNCTION__))
;
574 return StringRef(Barrier.Data, Barrier.Length);
575 }
576
577 bool getBarriernXSModifier() const {
578 assert(Kind == k_Barrier && "Invalid access!")((Kind == k_Barrier && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 578, __PRETTY_FUNCTION__))
;
579 return Barrier.HasnXSModifier;
580 }
581
582 unsigned getReg() const override {
583 assert(Kind == k_Register && "Invalid access!")((Kind == k_Register && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 583, __PRETTY_FUNCTION__))
;
584 return Reg.RegNum;
585 }
586
587 RegConstraintEqualityTy getRegEqualityTy() const {
588 assert(Kind == k_Register && "Invalid access!")((Kind == k_Register && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 588, __PRETTY_FUNCTION__))
;
589 return Reg.EqualityTy;
590 }
591
592 unsigned getVectorListStart() const {
593 assert(Kind == k_VectorList && "Invalid access!")((Kind == k_VectorList && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 593, __PRETTY_FUNCTION__))
;
594 return VectorList.RegNum;
595 }
596
597 unsigned getVectorListCount() const {
598 assert(Kind == k_VectorList && "Invalid access!")((Kind == k_VectorList && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 598, __PRETTY_FUNCTION__))
;
599 return VectorList.Count;
600 }
601
602 int getVectorIndex() const {
603 assert(Kind == k_VectorIndex && "Invalid access!")((Kind == k_VectorIndex && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 603, __PRETTY_FUNCTION__))
;
604 return VectorIndex.Val;
605 }
606
607 StringRef getSysReg() const {
608 assert(Kind == k_SysReg && "Invalid access!")((Kind == k_SysReg && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 608, __PRETTY_FUNCTION__))
;
609 return StringRef(SysReg.Data, SysReg.Length);
610 }
611
612 unsigned getSysCR() const {
613 assert(Kind == k_SysCR && "Invalid access!")((Kind == k_SysCR && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 613, __PRETTY_FUNCTION__))
;
614 return SysCRImm.Val;
615 }
616
617 unsigned getPrefetch() const {
618 assert(Kind == k_Prefetch && "Invalid access!")((Kind == k_Prefetch && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 618, __PRETTY_FUNCTION__))
;
619 return Prefetch.Val;
620 }
621
622 unsigned getPSBHint() const {
623 assert(Kind == k_PSBHint && "Invalid access!")((Kind == k_PSBHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 623, __PRETTY_FUNCTION__))
;
624 return PSBHint.Val;
625 }
626
627 StringRef getPSBHintName() const {
628 assert(Kind == k_PSBHint && "Invalid access!")((Kind == k_PSBHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 628, __PRETTY_FUNCTION__))
;
629 return StringRef(PSBHint.Data, PSBHint.Length);
630 }
631
632 unsigned getBTIHint() const {
633 assert(Kind == k_BTIHint && "Invalid access!")((Kind == k_BTIHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 633, __PRETTY_FUNCTION__))
;
634 return BTIHint.Val;
635 }
636
637 StringRef getBTIHintName() const {
638 assert(Kind == k_BTIHint && "Invalid access!")((Kind == k_BTIHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 638, __PRETTY_FUNCTION__))
;
639 return StringRef(BTIHint.Data, BTIHint.Length);
640 }
641
642 StringRef getPrefetchName() const {
643 assert(Kind == k_Prefetch && "Invalid access!")((Kind == k_Prefetch && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 643, __PRETTY_FUNCTION__))
;
644 return StringRef(Prefetch.Data, Prefetch.Length);
645 }
646
647 AArch64_AM::ShiftExtendType getShiftExtendType() const {
648 if (Kind == k_ShiftExtend)
649 return ShiftExtend.Type;
650 if (Kind == k_Register)
651 return Reg.ShiftExtend.Type;
652 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 652)
;
653 }
654
655 unsigned getShiftExtendAmount() const {
656 if (Kind == k_ShiftExtend)
657 return ShiftExtend.Amount;
658 if (Kind == k_Register)
659 return Reg.ShiftExtend.Amount;
660 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 660)
;
661 }
662
663 bool hasShiftExtendAmount() const {
664 if (Kind == k_ShiftExtend)
665 return ShiftExtend.HasExplicitAmount;
666 if (Kind == k_Register)
667 return Reg.ShiftExtend.HasExplicitAmount;
668 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 668)
;
669 }
670
671 bool isImm() const override { return Kind == k_Immediate; }
672 bool isMem() const override { return false; }
673
674 bool isUImm6() const {
675 if (!isImm())
676 return false;
677 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
678 if (!MCE)
679 return false;
680 int64_t Val = MCE->getValue();
681 return (Val >= 0 && Val < 64);
682 }
683
684 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
685
686 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
687 return isImmScaled<Bits, Scale>(true);
688 }
689
690 template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
691 return isImmScaled<Bits, Scale>(false);
692 }
693
694 template <int Bits, int Scale>
695 DiagnosticPredicate isImmScaled(bool Signed) const {
696 if (!isImm())
697 return DiagnosticPredicateTy::NoMatch;
698
699 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
700 if (!MCE)
701 return DiagnosticPredicateTy::NoMatch;
702
703 int64_t MinVal, MaxVal;
704 if (Signed) {
705 int64_t Shift = Bits - 1;
706 MinVal = (int64_t(1) << Shift) * -Scale;
707 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
708 } else {
709 MinVal = 0;
710 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
711 }
712
713 int64_t Val = MCE->getValue();
714 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
715 return DiagnosticPredicateTy::Match;
716
717 return DiagnosticPredicateTy::NearMatch;
718 }
719
720 DiagnosticPredicate isSVEPattern() const {
721 if (!isImm())
722 return DiagnosticPredicateTy::NoMatch;
723 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
724 if (!MCE)
725 return DiagnosticPredicateTy::NoMatch;
726 int64_t Val = MCE->getValue();
727 if (Val >= 0 && Val < 32)
728 return DiagnosticPredicateTy::Match;
729 return DiagnosticPredicateTy::NearMatch;
730 }
731
732 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
733 AArch64MCExpr::VariantKind ELFRefKind;
734 MCSymbolRefExpr::VariantKind DarwinRefKind;
735 int64_t Addend;
736 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
737 Addend)) {
738 // If we don't understand the expression, assume the best and
739 // let the fixup and relocation code deal with it.
740 return true;
741 }
742
743 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
744 ELFRefKind == AArch64MCExpr::VK_LO12 ||
745 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
746 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
747 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
748 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
749 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
750 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
751 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
752 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
753 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
754 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
755 // Note that we don't range-check the addend. It's adjusted modulo page
756 // size when converted, so there is no "out of range" condition when using
757 // @pageoff.
758 return true;
759 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
760 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
761 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
762 return Addend == 0;
763 }
764
765 return false;
766 }
767
768 template <int Scale> bool isUImm12Offset() const {
769 if (!isImm())
770 return false;
771
772 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
773 if (!MCE)
774 return isSymbolicUImm12Offset(getImm());
775
776 int64_t Val = MCE->getValue();
777 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
778 }
779
780 template <int N, int M>
781 bool isImmInRange() const {
782 if (!isImm())
783 return false;
784 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
785 if (!MCE)
786 return false;
787 int64_t Val = MCE->getValue();
788 return (Val >= N && Val <= M);
789 }
790
791 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
792 // a logical immediate can always be represented when inverted.
793 template <typename T>
794 bool isLogicalImm() const {
795 if (!isImm())
796 return false;
797 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
798 if (!MCE)
799 return false;
800
801 int64_t Val = MCE->getValue();
802 // Avoid left shift by 64 directly.
803 uint64_t Upper = UINT64_C(-1)-1UL << (sizeof(T) * 4) << (sizeof(T) * 4);
804 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
805 if ((Val & Upper) && (Val & Upper) != Upper)
806 return false;
807
808 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
809 }
810
811 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
812
813 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
814 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
815 /// immediate that can be shifted by 'Shift'.
816 template <unsigned Width>
817 Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
818 if (isShiftedImm() && Width == getShiftedImmShift())
819 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
820 return std::make_pair(CE->getValue(), Width);
821
822 if (isImm())
823 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
824 int64_t Val = CE->getValue();
825 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
826 return std::make_pair(Val >> Width, Width);
827 else
828 return std::make_pair(Val, 0u);
829 }
830
831 return {};
832 }
833
834 bool isAddSubImm() const {
835 if (!isShiftedImm() && !isImm())
836 return false;
837
838 const MCExpr *Expr;
839
840 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
841 if (isShiftedImm()) {
842 unsigned Shift = ShiftedImm.ShiftAmount;
843 Expr = ShiftedImm.Val;
844 if (Shift != 0 && Shift != 12)
845 return false;
846 } else {
847 Expr = getImm();
848 }
849
850 AArch64MCExpr::VariantKind ELFRefKind;
851 MCSymbolRefExpr::VariantKind DarwinRefKind;
852 int64_t Addend;
853 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
854 DarwinRefKind, Addend)) {
855 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
856 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
857 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
858 || ELFRefKind == AArch64MCExpr::VK_LO12
859 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
860 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
861 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
862 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
863 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
864 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
865 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
866 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
867 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
868 }
869
870 // If it's a constant, it should be a real immediate in range.
871 if (auto ShiftedVal = getShiftedVal<12>())
872 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
873
874 // If it's an expression, we hope for the best and let the fixup/relocation
875 // code deal with it.
876 return true;
877 }
878
879 bool isAddSubImmNeg() const {
880 if (!isShiftedImm() && !isImm())
881 return false;
882
883 // Otherwise it should be a real negative immediate in range.
884 if (auto ShiftedVal = getShiftedVal<12>())
885 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
886
887 return false;
888 }
889
890 // Signed value in the range -128 to +127. For element widths of
891 // 16 bits or higher it may also be a signed multiple of 256 in the
892 // range -32768 to +32512.
893 // For element-width of 8 bits a range of -128 to 255 is accepted,
894 // since a copy of a byte can be either signed/unsigned.
895 template <typename T>
896 DiagnosticPredicate isSVECpyImm() const {
897 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
898 return DiagnosticPredicateTy::NoMatch;
899
900 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
901 std::is_same<int8_t, T>::value;
902 if (auto ShiftedImm = getShiftedVal<8>())
903 if (!(IsByte && ShiftedImm->second) &&
904 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
905 << ShiftedImm->second))
906 return DiagnosticPredicateTy::Match;
907
908 return DiagnosticPredicateTy::NearMatch;
909 }
910
911 // Unsigned value in the range 0 to 255. For element widths of
912 // 16 bits or higher it may also be a signed multiple of 256 in the
913 // range 0 to 65280.
914 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
915 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
916 return DiagnosticPredicateTy::NoMatch;
917
918 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
919 std::is_same<int8_t, T>::value;
920 if (auto ShiftedImm = getShiftedVal<8>())
921 if (!(IsByte && ShiftedImm->second) &&
922 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
923 << ShiftedImm->second))
924 return DiagnosticPredicateTy::Match;
925
926 return DiagnosticPredicateTy::NearMatch;
927 }
928
929 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
930 if (isLogicalImm<T>() && !isSVECpyImm<T>())
931 return DiagnosticPredicateTy::Match;
932 return DiagnosticPredicateTy::NoMatch;
933 }
934
935 bool isCondCode() const { return Kind == k_CondCode; }
936
937 bool isSIMDImmType10() const {
938 if (!isImm())
939 return false;
940 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
941 if (!MCE)
942 return false;
943 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
944 }
945
946 template<int N>
947 bool isBranchTarget() const {
948 if (!isImm())
949 return false;
950 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
951 if (!MCE)
952 return true;
953 int64_t Val = MCE->getValue();
954 if (Val & 0x3)
955 return false;
956 assert(N > 0 && "Branch target immediate cannot be 0 bits!")((N > 0 && "Branch target immediate cannot be 0 bits!"
) ? static_cast<void> (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 956, __PRETTY_FUNCTION__))
;
957 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
958 }
959
960 bool
961 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
962 if (!isImm())
963 return false;
964
965 AArch64MCExpr::VariantKind ELFRefKind;
966 MCSymbolRefExpr::VariantKind DarwinRefKind;
967 int64_t Addend;
968 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
969 DarwinRefKind, Addend)) {
970 return false;
971 }
972 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
973 return false;
974
975 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
976 if (ELFRefKind == AllowedModifiers[i])
977 return true;
978 }
979
980 return false;
981 }
982
983 bool isMovWSymbolG3() const {
984 return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
985 }
986
987 bool isMovWSymbolG2() const {
988 return isMovWSymbol(
989 {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
990 AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
991 AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
992 AArch64MCExpr::VK_DTPREL_G2});
993 }
994
995 bool isMovWSymbolG1() const {
996 return isMovWSymbol(
997 {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
998 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
999 AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
1000 AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
1001 AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
1002 }
1003
1004 bool isMovWSymbolG0() const {
1005 return isMovWSymbol(
1006 {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
1007 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
1008 AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
1009 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
1010 AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
1011 }
1012
1013 template<int RegWidth, int Shift>
1014 bool isMOVZMovAlias() const {
1015 if (!isImm()) return false;
1016
1017 const MCExpr *E = getImm();
1018 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1019 uint64_t Value = CE->getValue();
1020
1021 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1022 }
1023 // Only supports the case of Shift being 0 if an expression is used as an
1024 // operand
1025 return !Shift && E;
1026 }
1027
1028 template<int RegWidth, int Shift>
1029 bool isMOVNMovAlias() const {
1030 if (!isImm()) return false;
1031
1032 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1033 if (!CE) return false;
1034 uint64_t Value = CE->getValue();
1035
1036 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1037 }
1038
1039 bool isFPImm() const {
1040 return Kind == k_FPImm &&
1041 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1042 }
1043
1044 bool isBarrier() const {
1045 return Kind == k_Barrier && !getBarriernXSModifier();
1046 }
1047 bool isBarriernXS() const {
1048 return Kind == k_Barrier && getBarriernXSModifier();
1049 }
1050 bool isSysReg() const { return Kind == k_SysReg; }
1051
1052 bool isMRSSystemRegister() const {
1053 if (!isSysReg()) return false;
1054
1055 return SysReg.MRSReg != -1U;
1056 }
1057
1058 bool isMSRSystemRegister() const {
1059 if (!isSysReg()) return false;
1060 return SysReg.MSRReg != -1U;
1061 }
1062
1063 bool isSystemPStateFieldWithImm0_1() const {
1064 if (!isSysReg()) return false;
1065 return (SysReg.PStateField == AArch64PState::PAN ||
1066 SysReg.PStateField == AArch64PState::DIT ||
1067 SysReg.PStateField == AArch64PState::UAO ||
1068 SysReg.PStateField == AArch64PState::SSBS);
1069 }
1070
1071 bool isSystemPStateFieldWithImm0_15() const {
1072 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1073 return SysReg.PStateField != -1U;
1074 }
1075
1076 bool isReg() const override {
1077 return Kind == k_Register;
1078 }
1079
1080 bool isScalarReg() const {
1081 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1082 }
1083
1084 bool isNeonVectorReg() const {
1085 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1086 }
1087
1088 bool isNeonVectorRegLo() const {
1089 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1090 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1091 Reg.RegNum) ||
1092 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1093 Reg.RegNum));
1094 }
1095
1096 template <unsigned Class> bool isSVEVectorReg() const {
1097 RegKind RK;
1098 switch (Class) {
1099 case AArch64::ZPRRegClassID:
1100 case AArch64::ZPR_3bRegClassID:
1101 case AArch64::ZPR_4bRegClassID:
1102 RK = RegKind::SVEDataVector;
1103 break;
1104 case AArch64::PPRRegClassID:
1105 case AArch64::PPR_3bRegClassID:
1106 RK = RegKind::SVEPredicateVector;
1107 break;
1108 default:
1109 llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class",
"/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1109)
;
1110 }
1111
1112 return (Kind == k_Register && Reg.Kind == RK) &&
1113 AArch64MCRegisterClasses[Class].contains(getReg());
1114 }
1115
1116 template <unsigned Class> bool isFPRasZPR() const {
1117 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1118 AArch64MCRegisterClasses[Class].contains(getReg());
1119 }
1120
1121 template <int ElementWidth, unsigned Class>
1122 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1123 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1124 return DiagnosticPredicateTy::NoMatch;
1125
1126 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1127 return DiagnosticPredicateTy::Match;
1128
1129 return DiagnosticPredicateTy::NearMatch;
1130 }
1131
1132 template <int ElementWidth, unsigned Class>
1133 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1134 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1135 return DiagnosticPredicateTy::NoMatch;
1136
1137 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1138 return DiagnosticPredicateTy::Match;
1139
1140 return DiagnosticPredicateTy::NearMatch;
1141 }
1142
1143 template <int ElementWidth, unsigned Class,
1144 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1145 bool ShiftWidthAlwaysSame>
1146 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1147 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1148 if (!VectorMatch.isMatch())
1149 return DiagnosticPredicateTy::NoMatch;
1150
1151 // Give a more specific diagnostic when the user has explicitly typed in
1152 // a shift-amount that does not match what is expected, but for which
1153 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1154 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1155 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1156 ShiftExtendTy == AArch64_AM::SXTW) &&
1157 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1158 return DiagnosticPredicateTy::NoMatch;
1159
1160 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1161 return DiagnosticPredicateTy::Match;
1162
1163 return DiagnosticPredicateTy::NearMatch;
1164 }
1165
1166 bool isGPR32as64() const {
1167 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1168 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1169 }
1170
1171 bool isGPR64as32() const {
1172 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1173 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1174 }
1175
1176 bool isGPR64x8() const {
1177 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1178 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1179 Reg.RegNum);
1180 }
1181
1182 bool isWSeqPair() const {
1183 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1184 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1185 Reg.RegNum);
1186 }
1187
1188 bool isXSeqPair() const {
1189 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1190 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1191 Reg.RegNum);
1192 }
1193
1194 template<int64_t Angle, int64_t Remainder>
1195 DiagnosticPredicate isComplexRotation() const {
1196 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1197
1198 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1199 if (!CE) return DiagnosticPredicateTy::NoMatch;
1200 uint64_t Value = CE->getValue();
1201
1202 if (Value % Angle == Remainder && Value <= 270)
1203 return DiagnosticPredicateTy::Match;
1204 return DiagnosticPredicateTy::NearMatch;
1205 }
1206
1207 template <unsigned RegClassID> bool isGPR64() const {
1208 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1209 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1210 }
1211
1212 template <unsigned RegClassID, int ExtWidth>
1213 DiagnosticPredicate isGPR64WithShiftExtend() const {
1214 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1215 return DiagnosticPredicateTy::NoMatch;
1216
1217 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1218 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1219 return DiagnosticPredicateTy::Match;
1220 return DiagnosticPredicateTy::NearMatch;
1221 }
1222
1223 /// Is this a vector list with the type implicit (presumably attached to the
1224 /// instruction itself)?
1225 template <RegKind VectorKind, unsigned NumRegs>
1226 bool isImplicitlyTypedVectorList() const {
1227 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1228 VectorList.NumElements == 0 &&
1229 VectorList.RegisterKind == VectorKind;
1230 }
1231
1232 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1233 unsigned ElementWidth>
1234 bool isTypedVectorList() const {
1235 if (Kind != k_VectorList)
1236 return false;
1237 if (VectorList.Count != NumRegs)
1238 return false;
1239 if (VectorList.RegisterKind != VectorKind)
1240 return false;
1241 if (VectorList.ElementWidth != ElementWidth)
1242 return false;
1243 return VectorList.NumElements == NumElements;
1244 }
1245
1246 template <int Min, int Max>
1247 DiagnosticPredicate isVectorIndex() const {
1248 if (Kind != k_VectorIndex)
1249 return DiagnosticPredicateTy::NoMatch;
1250 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1251 return DiagnosticPredicateTy::Match;
1252 return DiagnosticPredicateTy::NearMatch;
1253 }
1254
1255 bool isToken() const override { return Kind == k_Token; }
1256
1257 bool isTokenEqual(StringRef Str) const {
1258 return Kind == k_Token && getToken() == Str;
1259 }
1260 bool isSysCR() const { return Kind == k_SysCR; }
1261 bool isPrefetch() const { return Kind == k_Prefetch; }
1262 bool isPSBHint() const { return Kind == k_PSBHint; }
1263 bool isBTIHint() const { return Kind == k_BTIHint; }
1264 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1265 bool isShifter() const {
1266 if (!isShiftExtend())
1267 return false;
1268
1269 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1270 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1271 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1272 ST == AArch64_AM::MSL);
1273 }
1274
1275 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1276 if (Kind != k_FPImm)
1277 return DiagnosticPredicateTy::NoMatch;
1278
1279 if (getFPImmIsExact()) {
1280 // Lookup the immediate from table of supported immediates.
1281 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1282 assert(Desc && "Unknown enum value")((Desc && "Unknown enum value") ? static_cast<void
> (0) : __assert_fail ("Desc && \"Unknown enum value\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1282, __PRETTY_FUNCTION__))
;
1283
1284 // Calculate its FP value.
1285 APFloat RealVal(APFloat::IEEEdouble());
1286 auto StatusOrErr =
1287 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1288 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1289 llvm_unreachable("FP immediate is not exact")::llvm::llvm_unreachable_internal("FP immediate is not exact"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1289)
;
1290
1291 if (getFPImm().bitwiseIsEqual(RealVal))
1292 return DiagnosticPredicateTy::Match;
1293 }
1294
1295 return DiagnosticPredicateTy::NearMatch;
1296 }
1297
1298 template <unsigned ImmA, unsigned ImmB>
1299 DiagnosticPredicate isExactFPImm() const {
1300 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1301 if ((Res = isExactFPImm<ImmA>()))
1302 return DiagnosticPredicateTy::Match;
1303 if ((Res = isExactFPImm<ImmB>()))
1304 return DiagnosticPredicateTy::Match;
1305 return Res;
1306 }
1307
1308 bool isExtend() const {
1309 if (!isShiftExtend())
1310 return false;
1311
1312 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1313 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1314 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1315 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1316 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1317 ET == AArch64_AM::LSL) &&
1318 getShiftExtendAmount() <= 4;
1319 }
1320
1321 bool isExtend64() const {
1322 if (!isExtend())
1323 return false;
1324 // Make sure the extend expects a 32-bit source register.
1325 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1326 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1327 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1328 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1329 }
1330
1331 bool isExtendLSL64() const {
1332 if (!isExtend())
1333 return false;
1334 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1335 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1336 ET == AArch64_AM::LSL) &&
1337 getShiftExtendAmount() <= 4;
1338 }
1339
1340 template<int Width> bool isMemXExtend() const {
1341 if (!isExtend())
1342 return false;
1343 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1344 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1345 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1346 getShiftExtendAmount() == 0);
1347 }
1348
1349 template<int Width> bool isMemWExtend() const {
1350 if (!isExtend())
1351 return false;
1352 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1353 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1354 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1355 getShiftExtendAmount() == 0);
1356 }
1357
1358 template <unsigned width>
1359 bool isArithmeticShifter() const {
1360 if (!isShifter())
1361 return false;
1362
1363 // An arithmetic shifter is LSL, LSR, or ASR.
1364 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1365 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1366 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1367 }
1368
1369 template <unsigned width>
1370 bool isLogicalShifter() const {
1371 if (!isShifter())
1372 return false;
1373
1374 // A logical shifter is LSL, LSR, ASR or ROR.
1375 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1376 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1377 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1378 getShiftExtendAmount() < width;
1379 }
1380
1381 bool isMovImm32Shifter() const {
1382 if (!isShifter())
1383 return false;
1384
1385 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1386 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1387 if (ST != AArch64_AM::LSL)
1388 return false;
1389 uint64_t Val = getShiftExtendAmount();
1390 return (Val == 0 || Val == 16);
1391 }
1392
1393 bool isMovImm64Shifter() const {
1394 if (!isShifter())
1395 return false;
1396
1397 // A MOVi shifter is LSL of 0 or 16.
1398 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1399 if (ST != AArch64_AM::LSL)
1400 return false;
1401 uint64_t Val = getShiftExtendAmount();
1402 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1403 }
1404
1405 bool isLogicalVecShifter() const {
1406 if (!isShifter())
1407 return false;
1408
1409 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1410 unsigned Shift = getShiftExtendAmount();
1411 return getShiftExtendType() == AArch64_AM::LSL &&
1412 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1413 }
1414
1415 bool isLogicalVecHalfWordShifter() const {
1416 if (!isLogicalVecShifter())
1417 return false;
1418
1419 // A logical vector shifter is a left shift by 0 or 8.
1420 unsigned Shift = getShiftExtendAmount();
1421 return getShiftExtendType() == AArch64_AM::LSL &&
1422 (Shift == 0 || Shift == 8);
1423 }
1424
1425 bool isMoveVecShifter() const {
1426 if (!isShiftExtend())
1427 return false;
1428
1429 // A logical vector shifter is a left shift by 8 or 16.
1430 unsigned Shift = getShiftExtendAmount();
1431 return getShiftExtendType() == AArch64_AM::MSL &&
1432 (Shift == 8 || Shift == 16);
1433 }
1434
1435 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1436 // to LDUR/STUR when the offset is not legal for the former but is for
1437 // the latter. As such, in addition to checking for being a legal unscaled
1438 // address, also check that it is not a legal scaled address. This avoids
1439 // ambiguity in the matcher.
1440 template<int Width>
1441 bool isSImm9OffsetFB() const {
1442 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1443 }
1444
1445 bool isAdrpLabel() const {
1446 // Validation was handled during parsing, so we just sanity check that
1447 // something didn't go haywire.
1448 if (!isImm())
1449 return false;
1450
1451 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1452 int64_t Val = CE->getValue();
1453 int64_t Min = - (4096 * (1LL << (21 - 1)));
1454 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1455 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1456 }
1457
1458 return true;
1459 }
1460
1461 bool isAdrLabel() const {
1462 // Validation was handled during parsing, so we just sanity check that
1463 // something didn't go haywire.
1464 if (!isImm())
1465 return false;
1466
1467 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1468 int64_t Val = CE->getValue();
1469 int64_t Min = - (1LL << (21 - 1));
1470 int64_t Max = ((1LL << (21 - 1)) - 1);
1471 return Val >= Min && Val <= Max;
1472 }
1473
1474 return true;
1475 }
1476
1477 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1478 // Add as immediates when possible. Null MCExpr = 0.
1479 if (!Expr)
1480 Inst.addOperand(MCOperand::createImm(0));
1481 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1482 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1483 else
1484 Inst.addOperand(MCOperand::createExpr(Expr));
1485 }
1486
1487 void addRegOperands(MCInst &Inst, unsigned N) const {
1488 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1488, __PRETTY_FUNCTION__))
;
1489 Inst.addOperand(MCOperand::createReg(getReg()));
1490 }
1491
1492 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1493 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1493, __PRETTY_FUNCTION__))
;
1494 assert(((AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1495, __PRETTY_FUNCTION__))
1495 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1495, __PRETTY_FUNCTION__))
;
1496
1497 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1498 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1499 RI->getEncodingValue(getReg()));
1500
1501 Inst.addOperand(MCOperand::createReg(Reg));
1502 }
1503
1504 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1505 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1505, __PRETTY_FUNCTION__))
;
1506 assert(((AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1507, __PRETTY_FUNCTION__))
1507 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1507, __PRETTY_FUNCTION__))
;
1508
1509 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1510 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1511 RI->getEncodingValue(getReg()));
1512
1513 Inst.addOperand(MCOperand::createReg(Reg));
1514 }
1515
1516 template <int Width>
1517 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1518 unsigned Base;
1519 switch (Width) {
1520 case 8: Base = AArch64::B0; break;
1521 case 16: Base = AArch64::H0; break;
1522 case 32: Base = AArch64::S0; break;
1523 case 64: Base = AArch64::D0; break;
1524 case 128: Base = AArch64::Q0; break;
1525 default:
1526 llvm_unreachable("Unsupported width")::llvm::llvm_unreachable_internal("Unsupported width", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1526)
;
1527 }
1528 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1529 }
1530
1531 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1532 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1532, __PRETTY_FUNCTION__))
;
1533 assert(((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1534, __PRETTY_FUNCTION__))
1534 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1534, __PRETTY_FUNCTION__))
;
1535 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1536 }
1537
1538 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1539 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1539, __PRETTY_FUNCTION__))
;
1540 assert(((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1541, __PRETTY_FUNCTION__))
1541 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1541, __PRETTY_FUNCTION__))
;
1542 Inst.addOperand(MCOperand::createReg(getReg()));
1543 }
1544
1545 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1546 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1546, __PRETTY_FUNCTION__))
;
1547 Inst.addOperand(MCOperand::createReg(getReg()));
1548 }
1549
1550 enum VecListIndexType {
1551 VecListIdx_DReg = 0,
1552 VecListIdx_QReg = 1,
1553 VecListIdx_ZReg = 2,
1554 };
1555
1556 template <VecListIndexType RegTy, unsigned NumRegs>
1557 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1558 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1558, __PRETTY_FUNCTION__))
;
1559 static const unsigned FirstRegs[][5] = {
1560 /* DReg */ { AArch64::Q0,
1561 AArch64::D0, AArch64::D0_D1,
1562 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1563 /* QReg */ { AArch64::Q0,
1564 AArch64::Q0, AArch64::Q0_Q1,
1565 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1566 /* ZReg */ { AArch64::Z0,
1567 AArch64::Z0, AArch64::Z0_Z1,
1568 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1569 };
1570
1571 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs"
) ? static_cast<void> (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1572, __PRETTY_FUNCTION__))
1572 " NumRegs must be <= 4 for ZRegs")(((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs"
) ? static_cast<void> (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1572, __PRETTY_FUNCTION__))
;
1573
1574 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1575 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1576 FirstRegs[(unsigned)RegTy][0]));
1577 }
1578
1579 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1580 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1580, __PRETTY_FUNCTION__))
;
1581 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1582 }
1583
1584 template <unsigned ImmIs0, unsigned ImmIs1>
1585 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1586 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1586, __PRETTY_FUNCTION__))
;
1587 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")((bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand"
) ? static_cast<void> (0) : __assert_fail ("bool(isExactFPImm<ImmIs0, ImmIs1>()) && \"Invalid operand\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1587, __PRETTY_FUNCTION__))
;
1588 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1589 }
1590
1591 void addImmOperands(MCInst &Inst, unsigned N) const {
1592 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1592, __PRETTY_FUNCTION__))
;
1593 // If this is a pageoff symrefexpr with an addend, adjust the addend
1594 // to be only the page-offset portion. Otherwise, just add the expr
1595 // as-is.
1596 addExpr(Inst, getImm());
1597 }
1598
1599 template <int Shift>
1600 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1601 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1601, __PRETTY_FUNCTION__))
;
1602 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1603 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1604 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1605 } else if (isShiftedImm()) {
1606 addExpr(Inst, getShiftedImmVal());
1607 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1608 } else {
1609 addExpr(Inst, getImm());
1610 Inst.addOperand(MCOperand::createImm(0));
1611 }
1612 }
1613
1614 template <int Shift>
1615 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1616 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1616, __PRETTY_FUNCTION__))
;
1617 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1618 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1619 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1620 } else
1621 llvm_unreachable("Not a shifted negative immediate")::llvm::llvm_unreachable_internal("Not a shifted negative immediate"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1621)
;
1622 }
1623
1624 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1625 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1625, __PRETTY_FUNCTION__))
;
1626 Inst.addOperand(MCOperand::createImm(getCondCode()));
1627 }
1628
1629 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1630 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1630, __PRETTY_FUNCTION__))
;
1631 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1632 if (!MCE)
1633 addExpr(Inst, getImm());
1634 else
1635 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1636 }
1637
1638 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1639 addImmOperands(Inst, N);
1640 }
1641
1642 template<int Scale>
1643 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1644 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1644, __PRETTY_FUNCTION__))
;
1645 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1646
1647 if (!MCE) {
1648 Inst.addOperand(MCOperand::createExpr(getImm()));
1649 return;
1650 }
1651 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1652 }
1653
1654 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1655 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1655, __PRETTY_FUNCTION__))
;
1656 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1657 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1658 }
1659
1660 template <int Scale>
1661 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1662 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1662, __PRETTY_FUNCTION__))
;
1663 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1664 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1665 }
1666
1667 template <typename T>
1668 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1669 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1669, __PRETTY_FUNCTION__))
;
1670 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1671 std::make_unsigned_t<T> Val = MCE->getValue();
1672 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1673 Inst.addOperand(MCOperand::createImm(encoding));
1674 }
1675
1676 template <typename T>
1677 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1678 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1678, __PRETTY_FUNCTION__))
;
1679 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1680 std::make_unsigned_t<T> Val = ~MCE->getValue();
1681 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1682 Inst.addOperand(MCOperand::createImm(encoding));
1683 }
1684
1685 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1686 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1686, __PRETTY_FUNCTION__))
;
1687 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1688 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1689 Inst.addOperand(MCOperand::createImm(encoding));
1690 }
1691
1692 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1693 // Branch operands don't encode the low bits, so shift them off
1694 // here. If it's a label, however, just put it on directly as there's
1695 // not enough information now to do anything.
1696 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1696, __PRETTY_FUNCTION__))
;
1697 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1698 if (!MCE) {
1699 addExpr(Inst, getImm());
1700 return;
1701 }
1702 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1702, __PRETTY_FUNCTION__))
;
1703 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1704 }
1705
1706 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1707 // Branch operands don't encode the low bits, so shift them off
1708 // here. If it's a label, however, just put it on directly as there's
1709 // not enough information now to do anything.
1710 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1710, __PRETTY_FUNCTION__))
;
1711 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1712 if (!MCE) {
1713 addExpr(Inst, getImm());
1714 return;
1715 }
1716 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1716, __PRETTY_FUNCTION__))
;
1717 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1718 }
1719
1720 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1721 // Branch operands don't encode the low bits, so shift them off
1722 // here. If it's a label, however, just put it on directly as there's
1723 // not enough information now to do anything.
1724 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1724, __PRETTY_FUNCTION__))
;
1725 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1726 if (!MCE) {
1727 addExpr(Inst, getImm());
1728 return;
1729 }
1730 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1730, __PRETTY_FUNCTION__))
;
1731 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1732 }
1733
1734 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1735 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1735, __PRETTY_FUNCTION__))
;
1736 Inst.addOperand(MCOperand::createImm(
1737 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1738 }
1739
1740 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1741 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1741, __PRETTY_FUNCTION__))
;
1742 Inst.addOperand(MCOperand::createImm(getBarrier()));
1743 }
1744
1745 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
1746 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1746, __PRETTY_FUNCTION__))
;
1747 Inst.addOperand(MCOperand::createImm(getBarrier()));
1748 }
1749
1750 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1751 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1751, __PRETTY_FUNCTION__))
;
1752
1753 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1754 }
1755
1756 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1757 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1757, __PRETTY_FUNCTION__))
;
1758
1759 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1760 }
1761
1762 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1763 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1763, __PRETTY_FUNCTION__))
;
1764
1765 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1766 }
1767
1768 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1769 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1769, __PRETTY_FUNCTION__))
;
1770
1771 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1772 }
1773
1774 void addSysCROperands(MCInst &Inst, unsigned N) const {
1775 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1775, __PRETTY_FUNCTION__))
;
1776 Inst.addOperand(MCOperand::createImm(getSysCR()));
1777 }
1778
1779 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1780 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1780, __PRETTY_FUNCTION__))
;
1781 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1782 }
1783
1784 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1785 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1785, __PRETTY_FUNCTION__))
;
1786 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1787 }
1788
1789 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1790 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1790, __PRETTY_FUNCTION__))
;
1791 Inst.addOperand(MCOperand::createImm(getBTIHint()));
1792 }
1793
1794 void addShifterOperands(MCInst &Inst, unsigned N) const {
1795 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1795, __PRETTY_FUNCTION__))
;
1796 unsigned Imm =
1797 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1798 Inst.addOperand(MCOperand::createImm(Imm));
1799 }
1800
1801 void addExtendOperands(MCInst &Inst, unsigned N) const {
1802 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1802, __PRETTY_FUNCTION__))
;
1803 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1804 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1805 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1806 Inst.addOperand(MCOperand::createImm(Imm));
1807 }
1808
1809 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1810 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1810, __PRETTY_FUNCTION__))
;
1811 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1812 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1813 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1814 Inst.addOperand(MCOperand::createImm(Imm));
1815 }
1816
1817 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1818 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1818, __PRETTY_FUNCTION__))
;
1819 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1820 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1821 Inst.addOperand(MCOperand::createImm(IsSigned));
1822 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1823 }
1824
1825 // For 8-bit load/store instructions with a register offset, both the
1826 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1827 // they're disambiguated by whether the shift was explicit or implicit rather
1828 // than its size.
1829 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1830 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1830, __PRETTY_FUNCTION__))
;
1831 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1832 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1833 Inst.addOperand(MCOperand::createImm(IsSigned));
1834 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1835 }
1836
1837 template<int Shift>
1838 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1839 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1839, __PRETTY_FUNCTION__))
;
1840
1841 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1842 if (CE) {
1843 uint64_t Value = CE->getValue();
1844 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1845 } else {
1846 addExpr(Inst, getImm());
1847 }
1848 }
1849
1850 template<int Shift>
1851 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1852 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1852, __PRETTY_FUNCTION__))
;
1853
1854 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1855 uint64_t Value = CE->getValue();
1856 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1857 }
1858
1859 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1860 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1860, __PRETTY_FUNCTION__))
;
1861 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1862 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1863 }
1864
1865 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1866 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1866, __PRETTY_FUNCTION__))
;
1867 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1868 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1869 }
1870
1871 void print(raw_ostream &OS) const override;
1872
1873 static std::unique_ptr<AArch64Operand>
1874 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1875 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1876 Op->Tok.Data = Str.data();
1877 Op->Tok.Length = Str.size();
1878 Op->Tok.IsSuffix = IsSuffix;
1879 Op->StartLoc = S;
1880 Op->EndLoc = S;
1881 return Op;
1882 }
1883
1884 static std::unique_ptr<AArch64Operand>
1885 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1886 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1887 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1888 unsigned ShiftAmount = 0,
1889 unsigned HasExplicitAmount = false) {
1890 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1891 Op->Reg.RegNum = RegNum;
1892 Op->Reg.Kind = Kind;
1893 Op->Reg.ElementWidth = 0;
1894 Op->Reg.EqualityTy = EqTy;
1895 Op->Reg.ShiftExtend.Type = ExtTy;
1896 Op->Reg.ShiftExtend.Amount = ShiftAmount;
1897 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1898 Op->StartLoc = S;
1899 Op->EndLoc = E;
1900 return Op;
1901 }
1902
1903 static std::unique_ptr<AArch64Operand>
1904 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1905 SMLoc S, SMLoc E, MCContext &Ctx,
1906 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1907 unsigned ShiftAmount = 0,
1908 unsigned HasExplicitAmount = false) {
1909 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1911, __PRETTY_FUNCTION__))
1910 Kind == RegKind::SVEPredicateVector) &&(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1911, __PRETTY_FUNCTION__))
1911 "Invalid vector kind")(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1911, __PRETTY_FUNCTION__))
;
1912 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1913 HasExplicitAmount);
1914 Op->Reg.ElementWidth = ElementWidth;
1915 return Op;
1916 }
1917
1918 static std::unique_ptr<AArch64Operand>
1919 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1920 unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1921 MCContext &Ctx) {
1922 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
1923 Op->VectorList.RegNum = RegNum;
1924 Op->VectorList.Count = Count;
1925 Op->VectorList.NumElements = NumElements;
1926 Op->VectorList.ElementWidth = ElementWidth;
1927 Op->VectorList.RegisterKind = RegisterKind;
1928 Op->StartLoc = S;
1929 Op->EndLoc = E;
1930 return Op;
1931 }
1932
1933 static std::unique_ptr<AArch64Operand>
1934 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1935 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1936 Op->VectorIndex.Val = Idx;
1937 Op->StartLoc = S;
1938 Op->EndLoc = E;
1939 return Op;
1940 }
1941
1942 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1943 SMLoc E, MCContext &Ctx) {
1944 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
1945 Op->Imm.Val = Val;
1946 Op->StartLoc = S;
1947 Op->EndLoc = E;
1948 return Op;
1949 }
1950
1951 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1952 unsigned ShiftAmount,
1953 SMLoc S, SMLoc E,
1954 MCContext &Ctx) {
1955 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1956 Op->ShiftedImm .Val = Val;
1957 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1958 Op->StartLoc = S;
1959 Op->EndLoc = E;
1960 return Op;
1961 }
1962
1963 static std::unique_ptr<AArch64Operand>
1964 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1965 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
1966 Op->CondCode.Code = Code;
1967 Op->StartLoc = S;
1968 Op->EndLoc = E;
1969 return Op;
1970 }
1971
1972 static std::unique_ptr<AArch64Operand>
1973 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1974 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
1975 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1976 Op->FPImm.IsExact = IsExact;
1977 Op->StartLoc = S;
1978 Op->EndLoc = S;
1979 return Op;
1980 }
1981
1982 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1983 StringRef Str,
1984 SMLoc S,
1985 MCContext &Ctx,
1986 bool HasnXSModifier) {
1987 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
1988 Op->Barrier.Val = Val;
1989 Op->Barrier.Data = Str.data();
1990 Op->Barrier.Length = Str.size();
1991 Op->Barrier.HasnXSModifier = HasnXSModifier;
1992 Op->StartLoc = S;
1993 Op->EndLoc = S;
1994 return Op;
1995 }
1996
1997 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1998 uint32_t MRSReg,
1999 uint32_t MSRReg,
2000 uint32_t PStateField,
2001 MCContext &Ctx) {
2002 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2003 Op->SysReg.Data = Str.data();
2004 Op->SysReg.Length = Str.size();
2005 Op->SysReg.MRSReg = MRSReg;
2006 Op->SysReg.MSRReg = MSRReg;
2007 Op->SysReg.PStateField = PStateField;
2008 Op->StartLoc = S;
2009 Op->EndLoc = S;
2010 return Op;
2011 }
2012
2013 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2014 SMLoc E, MCContext &Ctx) {
2015 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2016 Op->SysCRImm.Val = Val;
2017 Op->StartLoc = S;
2018 Op->EndLoc = E;
2019 return Op;
2020 }
2021
2022 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2023 StringRef Str,
2024 SMLoc S,
2025 MCContext &Ctx) {
2026 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2027 Op->Prefetch.Val = Val;
2028 Op->Barrier.Data = Str.data();
2029 Op->Barrier.Length = Str.size();
2030 Op->StartLoc = S;
2031 Op->EndLoc = S;
2032 return Op;
2033 }
2034
2035 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2036 StringRef Str,
2037 SMLoc S,
2038 MCContext &Ctx) {
2039 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2040 Op->PSBHint.Val = Val;
2041 Op->PSBHint.Data = Str.data();
2042 Op->PSBHint.Length = Str.size();
2043 Op->StartLoc = S;
2044 Op->EndLoc = S;
2045 return Op;
2046 }
2047
2048 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2049 StringRef Str,
2050 SMLoc S,
2051 MCContext &Ctx) {
2052 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2053 Op->BTIHint.Val = Val << 1 | 32;
2054 Op->BTIHint.Data = Str.data();
2055 Op->BTIHint.Length = Str.size();
2056 Op->StartLoc = S;
2057 Op->EndLoc = S;
2058 return Op;
2059 }
2060
2061 static std::unique_ptr<AArch64Operand>
2062 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2063 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2064 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2065 Op->ShiftExtend.Type = ShOp;
2066 Op->ShiftExtend.Amount = Val;
2067 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2068 Op->StartLoc = S;
2069 Op->EndLoc = E;
2070 return Op;
2071 }
2072};
2073
2074} // end anonymous namespace.
2075
2076void AArch64Operand::print(raw_ostream &OS) const {
2077 switch (Kind) {
2078 case k_FPImm:
2079 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2080 if (!getFPImmIsExact())
2081 OS << " (inexact)";
2082 OS << ">";
2083 break;
2084 case k_Barrier: {
2085 StringRef Name = getBarrierName();
2086 if (!Name.empty())
2087 OS << "<barrier " << Name << ">";
2088 else
2089 OS << "<barrier invalid #" << getBarrier() << ">";
2090 break;
2091 }
2092 case k_Immediate:
2093 OS << *getImm();
2094 break;
2095 case k_ShiftedImm: {
2096 unsigned Shift = getShiftedImmShift();
2097 OS << "<shiftedimm ";
2098 OS << *getShiftedImmVal();
2099 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2100 break;
2101 }
2102 case k_CondCode:
2103 OS << "<condcode " << getCondCode() << ">";
2104 break;
2105 case k_VectorList: {
2106 OS << "<vectorlist ";
2107 unsigned Reg = getVectorListStart();
2108 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2109 OS << Reg + i << " ";
2110 OS << ">";
2111 break;
2112 }
2113 case k_VectorIndex:
2114 OS << "<vectorindex " << getVectorIndex() << ">";
2115 break;
2116 case k_SysReg:
2117 OS << "<sysreg: " << getSysReg() << '>';
2118 break;
2119 case k_Token:
2120 OS << "'" << getToken() << "'";
2121 break;
2122 case k_SysCR:
2123 OS << "c" << getSysCR();
2124 break;
2125 case k_Prefetch: {
2126 StringRef Name = getPrefetchName();
2127 if (!Name.empty())
2128 OS << "<prfop " << Name << ">";
2129 else
2130 OS << "<prfop invalid #" << getPrefetch() << ">";
2131 break;
2132 }
2133 case k_PSBHint:
2134 OS << getPSBHintName();
2135 break;
2136 case k_BTIHint:
2137 OS << getBTIHintName();
2138 break;
2139 case k_Register:
2140 OS << "<register " << getReg() << ">";
2141 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2142 break;
2143 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2144 case k_ShiftExtend:
2145 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2146 << getShiftExtendAmount();
2147 if (!hasShiftExtendAmount())
2148 OS << "<imp>";
2149 OS << '>';
2150 break;
2151 }
2152}
2153
2154/// @name Auto-generated Match Functions
2155/// {
2156
2157static unsigned MatchRegisterName(StringRef Name);
2158
2159/// }
2160
2161static unsigned MatchNeonVectorRegName(StringRef Name) {
2162 return StringSwitch<unsigned>(Name.lower())
2163 .Case("v0", AArch64::Q0)
2164 .Case("v1", AArch64::Q1)
2165 .Case("v2", AArch64::Q2)
2166 .Case("v3", AArch64::Q3)
2167 .Case("v4", AArch64::Q4)
2168 .Case("v5", AArch64::Q5)
2169 .Case("v6", AArch64::Q6)
2170 .Case("v7", AArch64::Q7)
2171 .Case("v8", AArch64::Q8)
2172 .Case("v9", AArch64::Q9)
2173 .Case("v10", AArch64::Q10)
2174 .Case("v11", AArch64::Q11)
2175 .Case("v12", AArch64::Q12)
2176 .Case("v13", AArch64::Q13)
2177 .Case("v14", AArch64::Q14)
2178 .Case("v15", AArch64::Q15)
2179 .Case("v16", AArch64::Q16)
2180 .Case("v17", AArch64::Q17)
2181 .Case("v18", AArch64::Q18)
2182 .Case("v19", AArch64::Q19)
2183 .Case("v20", AArch64::Q20)
2184 .Case("v21", AArch64::Q21)
2185 .Case("v22", AArch64::Q22)
2186 .Case("v23", AArch64::Q23)
2187 .Case("v24", AArch64::Q24)
2188 .Case("v25", AArch64::Q25)
2189 .Case("v26", AArch64::Q26)
2190 .Case("v27", AArch64::Q27)
2191 .Case("v28", AArch64::Q28)
2192 .Case("v29", AArch64::Q29)
2193 .Case("v30", AArch64::Q30)
2194 .Case("v31", AArch64::Q31)
2195 .Default(0);
2196}
2197
2198/// Returns an optional pair of (#elements, element-width) if Suffix
2199/// is a valid vector kind. Where the number of elements in a vector
2200/// or the vector width is implicit or explicitly unknown (but still a
2201/// valid suffix kind), 0 is used.
2202static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2203 RegKind VectorKind) {
2204 std::pair<int, int> Res = {-1, -1};
2205
2206 switch (VectorKind) {
2207 case RegKind::NeonVector:
2208 Res =
2209 StringSwitch<std::pair<int, int>>(Suffix.lower())
2210 .Case("", {0, 0})
2211 .Case(".1d", {1, 64})
2212 .Case(".1q", {1, 128})
2213 // '.2h' needed for fp16 scalar pairwise reductions
2214 .Case(".2h", {2, 16})
2215 .Case(".2s", {2, 32})
2216 .Case(".2d", {2, 64})
2217 // '.4b' is another special case for the ARMv8.2a dot product
2218 // operand
2219 .Case(".4b", {4, 8})
2220 .Case(".4h", {4, 16})
2221 .Case(".4s", {4, 32})
2222 .Case(".8b", {8, 8})
2223 .Case(".8h", {8, 16})
2224 .Case(".16b", {16, 8})
2225 // Accept the width neutral ones, too, for verbose syntax. If those
2226 // aren't used in the right places, the token operand won't match so
2227 // all will work out.
2228 .Case(".b", {0, 8})
2229 .Case(".h", {0, 16})
2230 .Case(".s", {0, 32})
2231 .Case(".d", {0, 64})
2232 .Default({-1, -1});
2233 break;
2234 case RegKind::SVEPredicateVector:
2235 case RegKind::SVEDataVector:
2236 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2237 .Case("", {0, 0})
2238 .Case(".b", {0, 8})
2239 .Case(".h", {0, 16})
2240 .Case(".s", {0, 32})
2241 .Case(".d", {0, 64})
2242 .Case(".q", {0, 128})
2243 .Default({-1, -1});
2244 break;
2245 default:
2246 llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2246)
;
2247 }
2248
2249 if (Res == std::make_pair(-1, -1))
2250 return Optional<std::pair<int, int>>();
2251
2252 return Optional<std::pair<int, int>>(Res);
2253}
2254
2255static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2256 return parseVectorKind(Suffix, VectorKind).hasValue();
2257}
2258
2259static unsigned matchSVEDataVectorRegName(StringRef Name) {
2260 return StringSwitch<unsigned>(Name.lower())
2261 .Case("z0", AArch64::Z0)
2262 .Case("z1", AArch64::Z1)
2263 .Case("z2", AArch64::Z2)
2264 .Case("z3", AArch64::Z3)
2265 .Case("z4", AArch64::Z4)
2266 .Case("z5", AArch64::Z5)
2267 .Case("z6", AArch64::Z6)
2268 .Case("z7", AArch64::Z7)
2269 .Case("z8", AArch64::Z8)
2270 .Case("z9", AArch64::Z9)
2271 .Case("z10", AArch64::Z10)
2272 .Case("z11", AArch64::Z11)
2273 .Case("z12", AArch64::Z12)
2274 .Case("z13", AArch64::Z13)
2275 .Case("z14", AArch64::Z14)
2276 .Case("z15", AArch64::Z15)
2277 .Case("z16", AArch64::Z16)
2278 .Case("z17", AArch64::Z17)
2279 .Case("z18", AArch64::Z18)
2280 .Case("z19", AArch64::Z19)
2281 .Case("z20", AArch64::Z20)
2282 .Case("z21", AArch64::Z21)
2283 .Case("z22", AArch64::Z22)
2284 .Case("z23", AArch64::Z23)
2285 .Case("z24", AArch64::Z24)
2286 .Case("z25", AArch64::Z25)
2287 .Case("z26", AArch64::Z26)
2288 .Case("z27", AArch64::Z27)
2289 .Case("z28", AArch64::Z28)
2290 .Case("z29", AArch64::Z29)
2291 .Case("z30", AArch64::Z30)
2292 .Case("z31", AArch64::Z31)
2293 .Default(0);
2294}
2295
2296static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2297 return StringSwitch<unsigned>(Name.lower())
2298 .Case("p0", AArch64::P0)
2299 .Case("p1", AArch64::P1)
2300 .Case("p2", AArch64::P2)
2301 .Case("p3", AArch64::P3)
2302 .Case("p4", AArch64::P4)
2303 .Case("p5", AArch64::P5)
2304 .Case("p6", AArch64::P6)
2305 .Case("p7", AArch64::P7)
2306 .Case("p8", AArch64::P8)
2307 .Case("p9", AArch64::P9)
2308 .Case("p10", AArch64::P10)
2309 .Case("p11", AArch64::P11)
2310 .Case("p12", AArch64::P12)
2311 .Case("p13", AArch64::P13)
2312 .Case("p14", AArch64::P14)
2313 .Case("p15", AArch64::P15)
2314 .Default(0);
2315}
2316
2317bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2318 SMLoc &EndLoc) {
2319 return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
25
Calling 'AArch64AsmParser::tryParseRegister'
31
Returning from 'AArch64AsmParser::tryParseRegister'
32
Returning without writing to 'RegNo'
2320}
2321
2322OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2323 SMLoc &StartLoc,
2324 SMLoc &EndLoc) {
2325 StartLoc = getLoc();
2326 auto Res = tryParseScalarRegister(RegNo);
26
Calling 'AArch64AsmParser::tryParseScalarRegister'
29
Returning from 'AArch64AsmParser::tryParseScalarRegister'
2327 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2328 return Res;
30
Returning without writing to 'RegNo'
2329}
2330
2331// Matches a register name or register alias previously defined by '.req'
2332unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2333 RegKind Kind) {
2334 unsigned RegNum = 0;
2335 if ((RegNum = matchSVEDataVectorRegName(Name)))
2336 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2337
2338 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2339 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2340
2341 if ((RegNum = MatchNeonVectorRegName(Name)))
2342 return Kind == RegKind::NeonVector ? RegNum : 0;
2343
2344 // The parsed register must be of RegKind Scalar
2345 if ((RegNum = MatchRegisterName(Name)))
2346 return Kind == RegKind::Scalar ? RegNum : 0;
2347
2348 if (!RegNum) {
2349 // Handle a few common aliases of registers.
2350 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2351 .Case("fp", AArch64::FP)
2352 .Case("lr", AArch64::LR)
2353 .Case("x31", AArch64::XZR)
2354 .Case("w31", AArch64::WZR)
2355 .Default(0))
2356 return Kind == RegKind::Scalar ? RegNum : 0;
2357
2358 // Check for aliases registered via .req. Canonicalize to lower case.
2359 // That's more consistent since register names are case insensitive, and
2360 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2361 auto Entry = RegisterReqs.find(Name.lower());
2362 if (Entry == RegisterReqs.end())
2363 return 0;
2364
2365 // set RegNum if the match is the right kind of register
2366 if (Kind == Entry->getValue().first)
2367 RegNum = Entry->getValue().second;
2368 }
2369 return RegNum;
2370}
2371
2372/// tryParseScalarRegister - Try to parse a register name. The token must be an
2373/// Identifier when called, and if it is a register name the token is eaten and
2374/// the register is added to the operand list.
2375OperandMatchResultTy
2376AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2377 MCAsmParser &Parser = getParser();
2378 const AsmToken &Tok = Parser.getTok();
2379 if (Tok.isNot(AsmToken::Identifier))
27
Taking true branch
2380 return MatchOperand_NoMatch;
28
Returning without writing to 'RegNum'
2381
2382 std::string lowerCase = Tok.getString().lower();
2383 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2384 if (Reg == 0)
2385 return MatchOperand_NoMatch;
2386
2387 RegNum = Reg;
2388 Parser.Lex(); // Eat identifier token.
2389 return MatchOperand_Success;
2390}
2391
2392/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2393OperandMatchResultTy
2394AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2395 MCAsmParser &Parser = getParser();
2396 SMLoc S = getLoc();
2397
2398 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2399 Error(S, "Expected cN operand where 0 <= N <= 15");
2400 return MatchOperand_ParseFail;
2401 }
2402
2403 StringRef Tok = Parser.getTok().getIdentifier();
2404 if (Tok[0] != 'c' && Tok[0] != 'C') {
2405 Error(S, "Expected cN operand where 0 <= N <= 15");
2406 return MatchOperand_ParseFail;
2407 }
2408
2409 uint32_t CRNum;
2410 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2411 if (BadNum || CRNum > 15) {
2412 Error(S, "Expected cN operand where 0 <= N <= 15");
2413 return MatchOperand_ParseFail;
2414 }
2415
2416 Parser.Lex(); // Eat identifier token.
2417 Operands.push_back(
2418 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2419 return MatchOperand_Success;
2420}
2421
2422/// tryParsePrefetch - Try to parse a prefetch operand.
2423template <bool IsSVEPrefetch>
2424OperandMatchResultTy
2425AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2426 MCAsmParser &Parser = getParser();
2427 SMLoc S = getLoc();
2428 const AsmToken &Tok = Parser.getTok();
2429
2430 auto LookupByName = [](StringRef N) {
2431 if (IsSVEPrefetch) {
2432 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2433 return Optional<unsigned>(Res->Encoding);
2434 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2435 return Optional<unsigned>(Res->Encoding);
2436 return Optional<unsigned>();
2437 };
2438
2439 auto LookupByEncoding = [](unsigned E) {
2440 if (IsSVEPrefetch) {
2441 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2442 return Optional<StringRef>(Res->Name);
2443 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2444 return Optional<StringRef>(Res->Name);
2445 return Optional<StringRef>();
2446 };
2447 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2448
2449 // Either an identifier for named values or a 5-bit immediate.
2450 // Eat optional hash.
2451 if (parseOptionalToken(AsmToken::Hash) ||
2452 Tok.is(AsmToken::Integer)) {
2453 const MCExpr *ImmVal;
2454 if (getParser().parseExpression(ImmVal))
2455 return MatchOperand_ParseFail;
2456
2457 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2458 if (!MCE) {
2459 TokError("immediate value expected for prefetch operand");
2460 return MatchOperand_ParseFail;
2461 }
2462 unsigned prfop = MCE->getValue();
2463 if (prfop > MaxVal) {
2464 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2465 "] expected");
2466 return MatchOperand_ParseFail;
2467 }
2468
2469 auto PRFM = LookupByEncoding(MCE->getValue());
2470 Operands.push_back(AArch64Operand::CreatePrefetch(
2471 prfop, PRFM.getValueOr(""), S, getContext()));
2472 return MatchOperand_Success;
2473 }
2474
2475 if (Tok.isNot(AsmToken::Identifier)) {
2476 TokError("prefetch hint expected");
2477 return MatchOperand_ParseFail;
2478 }
2479
2480 auto PRFM = LookupByName(Tok.getString());
2481 if (!PRFM) {
2482 TokError("prefetch hint expected");
2483 return MatchOperand_ParseFail;
2484 }
2485
2486 Operands.push_back(AArch64Operand::CreatePrefetch(
2487 *PRFM, Tok.getString(), S, getContext()));
2488 Parser.Lex(); // Eat identifier token.
2489 return MatchOperand_Success;
2490}
2491
2492/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2493OperandMatchResultTy
2494AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2495 MCAsmParser &Parser = getParser();
2496 SMLoc S = getLoc();
2497 const AsmToken &Tok = Parser.getTok();
2498 if (Tok.isNot(AsmToken::Identifier)) {
2499 TokError("invalid operand for instruction");
2500 return MatchOperand_ParseFail;
2501 }
2502
2503 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2504 if (!PSB) {
2505 TokError("invalid operand for instruction");
2506 return MatchOperand_ParseFail;
2507 }
2508
2509 Operands.push_back(AArch64Operand::CreatePSBHint(
2510 PSB->Encoding, Tok.getString(), S, getContext()));
2511 Parser.Lex(); // Eat identifier token.
2512 return MatchOperand_Success;
2513}
2514
2515/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2516OperandMatchResultTy
2517AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2518 MCAsmParser &Parser = getParser();
2519 SMLoc S = getLoc();
2520 const AsmToken &Tok = Parser.getTok();
2521 if (Tok.isNot(AsmToken::Identifier)) {
2522 TokError("invalid operand for instruction");
2523 return MatchOperand_ParseFail;
2524 }
2525
2526 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2527 if (!BTI) {
2528 TokError("invalid operand for instruction");
2529 return MatchOperand_ParseFail;
2530 }
2531
2532 Operands.push_back(AArch64Operand::CreateBTIHint(
2533 BTI->Encoding, Tok.getString(), S, getContext()));
2534 Parser.Lex(); // Eat identifier token.
2535 return MatchOperand_Success;
2536}
2537
2538/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2539/// instruction.
2540OperandMatchResultTy
2541AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2542 MCAsmParser &Parser = getParser();
2543 SMLoc S = getLoc();
2544 const MCExpr *Expr = nullptr;
2545
2546 if (Parser.getTok().is(AsmToken::Hash)) {
2547 Parser.Lex(); // Eat hash token.
2548 }
2549
2550 if (parseSymbolicImmVal(Expr))
2551 return MatchOperand_ParseFail;
2552
2553 AArch64MCExpr::VariantKind ELFRefKind;
2554 MCSymbolRefExpr::VariantKind DarwinRefKind;
2555 int64_t Addend;
2556 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2557 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2558 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2559 // No modifier was specified at all; this is the syntax for an ELF basic
2560 // ADRP relocation (unfortunately).
2561 Expr =
2562 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2563 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2564 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2565 Addend != 0) {
2566 Error(S, "gotpage label reference not allowed an addend");
2567 return MatchOperand_ParseFail;
2568 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2569 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2570 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2571 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2572 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2573 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
2574 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2575 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2576 // The operand must be an @page or @gotpage qualified symbolref.
2577 Error(S, "page or gotpage label reference expected");
2578 return MatchOperand_ParseFail;
2579 }
2580 }
2581
2582 // We have either a label reference possibly with addend or an immediate. The
2583 // addend is a raw value here. The linker will adjust it to only reference the
2584 // page.
2585 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2586 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2587
2588 return MatchOperand_Success;
2589}
2590
2591/// tryParseAdrLabel - Parse and validate a source label for the ADR
2592/// instruction.
2593OperandMatchResultTy
2594AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2595 SMLoc S = getLoc();
2596 const MCExpr *Expr = nullptr;
2597
2598 // Leave anything with a bracket to the default for SVE
2599 if (getParser().getTok().is(AsmToken::LBrac))
2600 return MatchOperand_NoMatch;
2601
2602 if (getParser().getTok().is(AsmToken::Hash))
2603 getParser().Lex(); // Eat hash token.
2604
2605 if (parseSymbolicImmVal(Expr))
2606 return MatchOperand_ParseFail;
2607
2608 AArch64MCExpr::VariantKind ELFRefKind;
2609 MCSymbolRefExpr::VariantKind DarwinRefKind;
2610 int64_t Addend;
2611 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2612 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2613 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2614 // No modifier was specified at all; this is the syntax for an ELF basic
2615 // ADR relocation (unfortunately).
2616 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2617 } else {
2618 Error(S, "unexpected adr label");
2619 return MatchOperand_ParseFail;
2620 }
2621 }
2622
2623 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2624 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2625 return MatchOperand_Success;
2626}
2627
2628/// tryParseFPImm - A floating point immediate expression operand.
2629template<bool AddFPZeroAsLiteral>
2630OperandMatchResultTy
2631AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2632 MCAsmParser &Parser = getParser();
2633 SMLoc S = getLoc();
2634
2635 bool Hash = parseOptionalToken(AsmToken::Hash);
2636
2637 // Handle negation, as that still comes through as a separate token.
2638 bool isNegative = parseOptionalToken(AsmToken::Minus);
2639
2640 const AsmToken &Tok = Parser.getTok();
2641 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2642 if (!Hash)
2643 return MatchOperand_NoMatch;
2644 TokError("invalid floating point immediate");
2645 return MatchOperand_ParseFail;
2646 }
2647
2648 // Parse hexadecimal representation.
2649 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2650 if (Tok.getIntVal() > 255 || isNegative) {
2651 TokError("encoded floating point value out of range");
2652 return MatchOperand_ParseFail;
2653 }
2654
2655 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2656 Operands.push_back(
2657 AArch64Operand::CreateFPImm(F, true, S, getContext()));
2658 } else {
2659 // Parse FP representation.
2660 APFloat RealVal(APFloat::IEEEdouble());
2661 auto StatusOrErr =
2662 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2663 if (errorToBool(StatusOrErr.takeError())) {
2664 TokError("invalid floating point representation");
2665 return MatchOperand_ParseFail;
2666 }
2667
2668 if (isNegative)
2669 RealVal.changeSign();
2670
2671 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2672 Operands.push_back(
2673 AArch64Operand::CreateToken("#0", false, S, getContext()));
2674 Operands.push_back(
2675 AArch64Operand::CreateToken(".0", false, S, getContext()));
2676 } else
2677 Operands.push_back(AArch64Operand::CreateFPImm(
2678 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2679 }
2680
2681 Parser.Lex(); // Eat the token.
2682
2683 return MatchOperand_Success;
2684}
2685
2686/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2687/// a shift suffix, for example '#1, lsl #12'.
2688OperandMatchResultTy
2689AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2690 MCAsmParser &Parser = getParser();
2691 SMLoc S = getLoc();
2692
2693 if (Parser.getTok().is(AsmToken::Hash))
2694 Parser.Lex(); // Eat '#'
2695 else if (Parser.getTok().isNot(AsmToken::Integer))
2696 // Operand should start from # or should be integer, emit error otherwise.
2697 return MatchOperand_NoMatch;
2698
2699 const MCExpr *Imm = nullptr;
2700 if (parseSymbolicImmVal(Imm))
2701 return MatchOperand_ParseFail;
2702 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2703 SMLoc E = Parser.getTok().getLoc();
2704 Operands.push_back(
2705 AArch64Operand::CreateImm(Imm, S, E, getContext()));
2706 return MatchOperand_Success;
2707 }
2708
2709 // Eat ','
2710 Parser.Lex();
2711
2712 // The optional operand must be "lsl #N" where N is non-negative.
2713 if (!Parser.getTok().is(AsmToken::Identifier) ||
2714 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2715 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2716 return MatchOperand_ParseFail;
2717 }
2718
2719 // Eat 'lsl'
2720 Parser.Lex();
2721
2722 parseOptionalToken(AsmToken::Hash);
2723
2724 if (Parser.getTok().isNot(AsmToken::Integer)) {
2725 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2726 return MatchOperand_ParseFail;
2727 }
2728
2729 int64_t ShiftAmount = Parser.getTok().getIntVal();
2730
2731 if (ShiftAmount < 0) {
2732 Error(Parser.getTok().getLoc(), "positive shift amount required");
2733 return MatchOperand_ParseFail;
2734 }
2735 Parser.Lex(); // Eat the number
2736
2737 // Just in case the optional lsl #0 is used for immediates other than zero.
2738 if (ShiftAmount == 0 && Imm != nullptr) {
2739 SMLoc E = Parser.getTok().getLoc();
2740 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2741 return MatchOperand_Success;
2742 }
2743
2744 SMLoc E = Parser.getTok().getLoc();
2745 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2746 S, E, getContext()));
2747 return MatchOperand_Success;
2748}
2749
2750/// parseCondCodeString - Parse a Condition Code string.
2751AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2752 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2753 .Case("eq", AArch64CC::EQ)
2754 .Case("ne", AArch64CC::NE)
2755 .Case("cs", AArch64CC::HS)
2756 .Case("hs", AArch64CC::HS)
2757 .Case("cc", AArch64CC::LO)
2758 .Case("lo", AArch64CC::LO)
2759 .Case("mi", AArch64CC::MI)
2760 .Case("pl", AArch64CC::PL)
2761 .Case("vs", AArch64CC::VS)
2762 .Case("vc", AArch64CC::VC)
2763 .Case("hi", AArch64CC::HI)
2764 .Case("ls", AArch64CC::LS)
2765 .Case("ge", AArch64CC::GE)
2766 .Case("lt", AArch64CC::LT)
2767 .Case("gt", AArch64CC::GT)
2768 .Case("le", AArch64CC::LE)
2769 .Case("al", AArch64CC::AL)
2770 .Case("nv", AArch64CC::NV)
2771 .Default(AArch64CC::Invalid);
2772
2773 if (CC == AArch64CC::Invalid &&
2774 getSTI().getFeatureBits()[AArch64::FeatureSVE])
2775 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2776 .Case("none", AArch64CC::EQ)
2777 .Case("any", AArch64CC::NE)
2778 .Case("nlast", AArch64CC::HS)
2779 .Case("last", AArch64CC::LO)
2780 .Case("first", AArch64CC::MI)
2781 .Case("nfrst", AArch64CC::PL)
2782 .Case("pmore", AArch64CC::HI)
2783 .Case("plast", AArch64CC::LS)
2784 .Case("tcont", AArch64CC::GE)
2785 .Case("tstop", AArch64CC::LT)
2786 .Default(AArch64CC::Invalid);
2787
2788 return CC;
2789}
2790
2791/// parseCondCode - Parse a Condition Code operand.
2792bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2793 bool invertCondCode) {
2794 MCAsmParser &Parser = getParser();
2795 SMLoc S = getLoc();
2796 const AsmToken &Tok = Parser.getTok();
2797 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")((Tok.is(AsmToken::Identifier) && "Token is not an Identifier"
) ? static_cast<void> (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2797, __PRETTY_FUNCTION__))
;
2798
2799 StringRef Cond = Tok.getString();
2800 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2801 if (CC == AArch64CC::Invalid)
2802 return TokError("invalid condition code");
2803 Parser.Lex(); // Eat identifier token.
2804
2805 if (invertCondCode) {
2806 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2807 return TokError("condition codes AL and NV are invalid for this instruction");
2808 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2809 }
2810
2811 Operands.push_back(
2812 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2813 return false;
2814}
2815
2816/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2817/// them if present.
2818OperandMatchResultTy
2819AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2820 MCAsmParser &Parser = getParser();
2821 const AsmToken &Tok = Parser.getTok();
2822 std::string LowerID = Tok.getString().lower();
2823 AArch64_AM::ShiftExtendType ShOp =
2824 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2825 .Case("lsl", AArch64_AM::LSL)
2826 .Case("lsr", AArch64_AM::LSR)
2827 .Case("asr", AArch64_AM::ASR)
2828 .Case("ror", AArch64_AM::ROR)
2829 .Case("msl", AArch64_AM::MSL)
2830 .Case("uxtb", AArch64_AM::UXTB)
2831 .Case("uxth", AArch64_AM::UXTH)
2832 .Case("uxtw", AArch64_AM::UXTW)
2833 .Case("uxtx", AArch64_AM::UXTX)
2834 .Case("sxtb", AArch64_AM::SXTB)
2835 .Case("sxth", AArch64_AM::SXTH)
2836 .Case("sxtw", AArch64_AM::SXTW)
2837 .Case("sxtx", AArch64_AM::SXTX)
2838 .Default(AArch64_AM::InvalidShiftExtend);
2839
2840 if (ShOp == AArch64_AM::InvalidShiftExtend)
2841 return MatchOperand_NoMatch;
2842
2843 SMLoc S = Tok.getLoc();
2844 Parser.Lex();
2845
2846 bool Hash = parseOptionalToken(AsmToken::Hash);
2847
2848 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2849 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2850 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2851 ShOp == AArch64_AM::MSL) {
2852 // We expect a number here.
2853 TokError("expected #imm after shift specifier");
2854 return MatchOperand_ParseFail;
2855 }
2856
2857 // "extend" type operations don't need an immediate, #0 is implicit.
2858 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2859 Operands.push_back(
2860 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2861 return MatchOperand_Success;
2862 }
2863
2864 // Make sure we do actually have a number, identifier or a parenthesized
2865 // expression.
2866 SMLoc E = Parser.getTok().getLoc();
2867 if (!Parser.getTok().is(AsmToken::Integer) &&
2868 !Parser.getTok().is(AsmToken::LParen) &&
2869 !Parser.getTok().is(AsmToken::Identifier)) {
2870 Error(E, "expected integer shift amount");
2871 return MatchOperand_ParseFail;
2872 }
2873
2874 const MCExpr *ImmVal;
2875 if (getParser().parseExpression(ImmVal))
2876 return MatchOperand_ParseFail;
2877
2878 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2879 if (!MCE) {
2880 Error(E, "expected constant '#imm' after shift specifier");
2881 return MatchOperand_ParseFail;
2882 }
2883
2884 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2885 Operands.push_back(AArch64Operand::CreateShiftExtend(
2886 ShOp, MCE->getValue(), true, S, E, getContext()));
2887 return MatchOperand_Success;
2888}
2889
2890static const struct Extension {
2891 const char *Name;
2892 const FeatureBitset Features;
2893} ExtensionMap[] = {
2894 {"crc", {AArch64::FeatureCRC}},
2895 {"sm4", {AArch64::FeatureSM4}},
2896 {"sha3", {AArch64::FeatureSHA3}},
2897 {"sha2", {AArch64::FeatureSHA2}},
2898 {"aes", {AArch64::FeatureAES}},
2899 {"crypto", {AArch64::FeatureCrypto}},
2900 {"fp", {AArch64::FeatureFPARMv8}},
2901 {"simd", {AArch64::FeatureNEON}},
2902 {"ras", {AArch64::FeatureRAS}},
2903 {"lse", {AArch64::FeatureLSE}},
2904 {"predres", {AArch64::FeaturePredRes}},
2905 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
2906 {"mte", {AArch64::FeatureMTE}},
2907 {"memtag", {AArch64::FeatureMTE}},
2908 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
2909 {"pan", {AArch64::FeaturePAN}},
2910 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
2911 {"ccpp", {AArch64::FeatureCCPP}},
2912 {"rcpc", {AArch64::FeatureRCPC}},
2913 {"rng", {AArch64::FeatureRandGen}},
2914 {"sve", {AArch64::FeatureSVE}},
2915 {"sve2", {AArch64::FeatureSVE2}},
2916 {"sve2-aes", {AArch64::FeatureSVE2AES}},
2917 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
2918 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
2919 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
2920 {"ls64", {AArch64::FeatureLS64}},
2921 {"xs", {AArch64::FeatureXS}},
2922 {"pauth", {AArch64::FeaturePAuth}},
2923 {"flagm", {AArch64::FeatureFlagM}},
2924 // FIXME: Unsupported extensions
2925 {"lor", {}},
2926 {"rdma", {}},
2927 {"profile", {}},
2928};
2929
2930static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2931 if (FBS[AArch64::HasV8_1aOps])
2932 Str += "ARMv8.1a";
2933 else if (FBS[AArch64::HasV8_2aOps])
2934 Str += "ARMv8.2a";
2935 else if (FBS[AArch64::HasV8_3aOps])
2936 Str += "ARMv8.3a";
2937 else if (FBS[AArch64::HasV8_4aOps])
2938 Str += "ARMv8.4a";
2939 else if (FBS[AArch64::HasV8_5aOps])
2940 Str += "ARMv8.5a";
2941 else if (FBS[AArch64::HasV8_6aOps])
2942 Str += "ARMv8.6a";
2943 else if (FBS[AArch64::HasV8_7aOps])
2944 Str += "ARMv8.7a";
2945 else {
2946 SmallVector<std::string, 2> ExtMatches;
2947 for (const auto& Ext : ExtensionMap) {
2948 // Use & in case multiple features are enabled
2949 if ((FBS & Ext.Features) != FeatureBitset())
2950 ExtMatches.push_back(Ext.Name);
2951 }
2952 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
2953 }
2954}
2955
2956void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2957 SMLoc S) {
2958 const uint16_t Op2 = Encoding & 7;
2959 const uint16_t Cm = (Encoding & 0x78) >> 3;
2960 const uint16_t Cn = (Encoding & 0x780) >> 7;
2961 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2962
2963 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2964
2965 Operands.push_back(
2966 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2967 Operands.push_back(
2968 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2969 Operands.push_back(
2970 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2971 Expr = MCConstantExpr::create(Op2, getContext());
2972 Operands.push_back(
2973 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2974}
2975
2976/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2977/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2978bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2979 OperandVector &Operands) {
2980 if (Name.find('.') != StringRef::npos)
2981 return TokError("invalid operand");
2982
2983 Mnemonic = Name;
2984 Operands.push_back(
2985 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2986
2987 MCAsmParser &Parser = getParser();
2988 const AsmToken &Tok = Parser.getTok();
2989 StringRef Op = Tok.getString();
2990 SMLoc S = Tok.getLoc();
2991
2992 if (Mnemonic == "ic") {
2993 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2994 if (!IC)
2995 return TokError("invalid operand for IC instruction");
2996 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2997 std::string Str("IC " + std::string(IC->Name) + " requires: ");
2998 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2999 return TokError(Str.c_str());
3000 }
3001 createSysAlias(IC->Encoding, Operands, S);
3002 } else if (Mnemonic == "dc") {
3003 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3004 if (!DC)
3005 return TokError("invalid operand for DC instruction");
3006 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3007 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3008 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3009 return TokError(Str.c_str());
3010 }
3011 createSysAlias(DC->Encoding, Operands, S);
3012 } else if (Mnemonic == "at") {
3013 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3014 if (!AT)
3015 return TokError("invalid operand for AT instruction");
3016 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3017 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3018 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
3019 return TokError(Str.c_str());
3020 }
3021 createSysAlias(AT->Encoding, Operands, S);
3022 } else if (Mnemonic == "tlbi") {
3023 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3024 if (!TLBI)
3025 return TokError("invalid operand for TLBI instruction");
3026 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3027 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3028 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
3029 return TokError(Str.c_str());
3030 }
3031 createSysAlias(TLBI->Encoding, Operands, S);
3032 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
3033 const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
3034 if (!PRCTX)
3035 return TokError("invalid operand for prediction restriction instruction");
3036 else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
3037 std::string Str(
3038 Mnemonic.upper() + std::string(PRCTX->Name) + " requires: ");
3039 setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
3040 return TokError(Str.c_str());
3041 }
3042 uint16_t PRCTX_Op2 =
3043 Mnemonic == "cfp" ? 4 :
3044 Mnemonic == "dvp" ? 5 :
3045 Mnemonic == "cpp" ? 7 :
3046 0;
3047 assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction")((PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction"
) ? static_cast<void> (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3047, __PRETTY_FUNCTION__))
;
3048 createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
3049 }
3050
3051 Parser.Lex(); // Eat operand.
3052
3053 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3054 bool HasRegister = false;
3055
3056 // Check for the optional register operand.
3057 if (parseOptionalToken(AsmToken::Comma)) {
3058 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3059 return TokError("expected register operand");
3060 HasRegister = true;
3061 }
3062
3063 if (ExpectRegister && !HasRegister)
3064 return TokError("specified " + Mnemonic + " op requires a register");
3065 else if (!ExpectRegister && HasRegister)
3066 return TokError("specified " + Mnemonic + " op does not use a register");
3067
3068 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3069 return true;
3070
3071 return false;
3072}
3073
3074OperandMatchResultTy
3075AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3076 MCAsmParser &Parser = getParser();
3077 const AsmToken &Tok = Parser.getTok();
3078
3079 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3080 TokError("'csync' operand expected");
3081 return MatchOperand_ParseFail;
3082 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3083 // Immediate operand.
3084 const MCExpr *ImmVal;
3085 SMLoc ExprLoc = getLoc();
3086 AsmToken IntTok = Tok;
3087 if (getParser().parseExpression(ImmVal))
3088 return MatchOperand_ParseFail;
3089 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3090 if (!MCE) {
3091 Error(ExprLoc, "immediate value expected for barrier operand");
3092 return MatchOperand_ParseFail;
3093 }
3094 int64_t Value = MCE->getValue();
3095 if (Mnemonic == "dsb" && Value > 15) {
3096 // This case is a no match here, but it might be matched by the nXS
3097 // variant. Deliberately not unlex the optional '#' as it is not necessary
3098 // to characterize an integer immediate.
3099 Parser.getLexer().UnLex(IntTok);
3100 return MatchOperand_NoMatch;
3101 }
3102 if (Value < 0 || Value > 15) {
3103 Error(ExprLoc, "barrier operand out of range");
3104 return MatchOperand_ParseFail;
3105 }
3106 auto DB = AArch64DB::lookupDBByEncoding(Value);
3107 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3108 ExprLoc, getContext(),
3109 false /*hasnXSModifier*/));
3110 return MatchOperand_Success;
3111 }
3112
3113 if (Tok.isNot(AsmToken::Identifier)) {
3114 TokError("invalid operand for instruction");
3115 return MatchOperand_ParseFail;
3116 }
3117
3118 StringRef Operand = Tok.getString();
3119 auto TSB = AArch64TSB::lookupTSBByName(Operand);
3120 auto DB = AArch64DB::lookupDBByName(Operand);
3121 // The only valid named option for ISB is 'sy'
3122 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3123 TokError("'sy' or #imm operand expected");
3124 return MatchOperand_ParseFail;
3125 // The only valid named option for TSB is 'csync'
3126 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3127 TokError("'csync' operand expected");
3128 return MatchOperand_ParseFail;
3129 } else if (!DB && !TSB) {
3130 if (Mnemonic == "dsb") {
3131 // This case is a no match here, but it might be matched by the nXS
3132 // variant.
3133 return MatchOperand_NoMatch;
3134 }
3135 TokError("invalid barrier option name");
3136 return MatchOperand_ParseFail;
3137 }
3138
3139 Operands.push_back(AArch64Operand::CreateBarrier(
3140 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3141 getContext(), false /*hasnXSModifier*/));
3142 Parser.Lex(); // Consume the option
3143
3144 return MatchOperand_Success;
3145}
3146
3147OperandMatchResultTy
3148AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3149 MCAsmParser &Parser = getParser();
3150 const AsmToken &Tok = Parser.getTok();
3151
3152 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands")((Mnemonic == "dsb" && "Instruction does not accept nXS operands"
) ? static_cast<void> (0) : __assert_fail ("Mnemonic == \"dsb\" && \"Instruction does not accept nXS operands\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3152, __PRETTY_FUNCTION__))
;
3153 if (Mnemonic != "dsb")
3154 return MatchOperand_ParseFail;
3155
3156 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3157 // Immediate operand.
3158 const MCExpr *ImmVal;
3159 SMLoc ExprLoc = getLoc();
3160 if (getParser().parseExpression(ImmVal))
3161 return MatchOperand_ParseFail;
3162 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3163 if (!MCE) {
3164 Error(ExprLoc, "immediate value expected for barrier operand");
3165 return MatchOperand_ParseFail;
3166 }
3167 int64_t Value = MCE->getValue();
3168 // v8.7-A DSB in the nXS variant accepts only the following immediate
3169 // values: 16, 20, 24, 28.
3170 if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
3171 Error(ExprLoc, "barrier operand out of range");
3172 return MatchOperand_ParseFail;
3173 }
3174 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
3175 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
3176 ExprLoc, getContext(),
3177 true /*hasnXSModifier*/));
3178 return MatchOperand_Success;
3179 }
3180
3181 if (Tok.isNot(AsmToken::Identifier)) {
3182 TokError("invalid operand for instruction");
3183 return MatchOperand_ParseFail;
3184 }
3185
3186 StringRef Operand = Tok.getString();
3187 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
3188
3189 if (!DB) {
3190 TokError("invalid barrier option name");
3191 return MatchOperand_ParseFail;
3192 }
3193
3194 Operands.push_back(
3195 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
3196 getContext(), true /*hasnXSModifier*/));
3197 Parser.Lex(); // Consume the option
3198
3199 return MatchOperand_Success;
3200}
3201
3202OperandMatchResultTy
3203AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3204 MCAsmParser &Parser = getParser();
3205 const AsmToken &Tok = Parser.getTok();
3206
3207 if (Tok.isNot(AsmToken::Identifier))
3208 return MatchOperand_NoMatch;
3209
3210 int MRSReg, MSRReg;
3211 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3212 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3213 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3214 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3215 } else
3216 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3217
3218 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3219 unsigned PStateImm = -1;
3220 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3221 PStateImm = PState->Encoding;
3222
3223 Operands.push_back(
3224 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3225 PStateImm, getContext()));
3226 Parser.Lex(); // Eat identifier
3227
3228 return MatchOperand_Success;
3229}
3230
3231/// tryParseNeonVectorRegister - Parse a vector register operand.
3232bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3233 MCAsmParser &Parser = getParser();
3234 if (Parser.getTok().isNot(AsmToken::Identifier))
3235 return true;
3236
3237 SMLoc S = getLoc();
3238 // Check for a vector register specifier first.
3239 StringRef Kind;
3240 unsigned Reg;
3241 OperandMatchResultTy Res =
3242 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3243 if (Res != MatchOperand_Success)
3244 return true;
3245
3246 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3247 if (!KindRes)
3248 return true;
3249
3250 unsigned ElementWidth = KindRes->second;
3251 Operands.push_back(
3252 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3253 S, getLoc(), getContext()));
3254
3255 // If there was an explicit qualifier, that goes on as a literal text
3256 // operand.
3257 if (!Kind.empty())
3258 Operands.push_back(
3259 AArch64Operand::CreateToken(Kind, false, S, getContext()));
3260
3261 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3262}
3263
3264OperandMatchResultTy
3265AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3266 SMLoc SIdx = getLoc();
3267 if (parseOptionalToken(AsmToken::LBrac)) {
3268 const MCExpr *ImmVal;
3269 if (getParser().parseExpression(ImmVal))
3270 return MatchOperand_NoMatch;
3271 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3272 if (!MCE) {
3273 TokError("immediate value expected for vector index");
3274 return MatchOperand_ParseFail;;
3275 }
3276
3277 SMLoc E = getLoc();
3278
3279 if (parseToken(AsmToken::RBrac, "']' expected"))
3280 return MatchOperand_ParseFail;;
3281
3282 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3283 E, getContext()));
3284 return MatchOperand_Success;
3285 }
3286
3287 return MatchOperand_NoMatch;
3288}
3289
3290// tryParseVectorRegister - Try to parse a vector register name with
3291// optional kind specifier. If it is a register specifier, eat the token
3292// and return it.
3293OperandMatchResultTy
3294AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3295 RegKind MatchKind) {
3296 MCAsmParser &Parser = getParser();
3297 const AsmToken &Tok = Parser.getTok();
3298
3299 if (Tok.isNot(AsmToken::Identifier))
3300 return MatchOperand_NoMatch;
3301
3302 StringRef Name = Tok.getString();
3303 // If there is a kind specifier, it's separated from the register name by
3304 // a '.'.
3305 size_t Start = 0, Next = Name.find('.');
3306 StringRef Head = Name.slice(Start, Next);
3307 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3308
3309 if (RegNum) {
3310 if (Next != StringRef::npos) {
3311 Kind = Name.slice(Next, StringRef::npos);
3312 if (!isValidVectorKind(Kind, MatchKind)) {
3313 TokError("invalid vector kind qualifier");
3314 return MatchOperand_ParseFail;
3315 }
3316 }
3317 Parser.Lex(); // Eat the register token.
3318
3319 Reg = RegNum;
3320 return MatchOperand_Success;
3321 }
3322
3323 return MatchOperand_NoMatch;
3324}
3325
3326/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3327OperandMatchResultTy
3328AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3329 // Check for a SVE predicate register specifier first.
3330 const SMLoc S = getLoc();
3331 StringRef Kind;
3332 unsigned RegNum;
3333 auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3334 if (Res != MatchOperand_Success)
3335 return Res;
3336
3337 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3338 if (!KindRes)
3339 return MatchOperand_NoMatch;
3340
3341 unsigned ElementWidth = KindRes->second;
3342 Operands.push_back(AArch64Operand::CreateVectorReg(
3343 RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3344 getLoc(), getContext()));
3345
3346 // Not all predicates are followed by a '/m' or '/z'.
3347 MCAsmParser &Parser = getParser();
3348 if (Parser.getTok().isNot(AsmToken::Slash))
3349 return MatchOperand_Success;
3350
3351 // But when they do they shouldn't have an element type suffix.
3352 if (!Kind.empty()) {
3353 Error(S, "not expecting size suffix");
3354 return MatchOperand_ParseFail;
3355 }
3356
3357 // Add a literal slash as operand
3358 Operands.push_back(
3359 AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3360
3361 Parser.Lex(); // Eat the slash.
3362
3363 // Zeroing or merging?
3364 auto Pred = Parser.getTok().getString().lower();
3365 if (Pred != "z" && Pred != "m") {
3366 Error(getLoc(), "expecting 'm' or 'z' predication");
3367 return MatchOperand_ParseFail;
3368 }
3369
3370 // Add zero/merge token.
3371 const char *ZM = Pred == "z" ? "z" : "m";
3372 Operands.push_back(
3373 AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3374
3375 Parser.Lex(); // Eat zero/merge token.
3376 return MatchOperand_Success;
3377}
3378
3379/// parseRegister - Parse a register operand.
3380bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3381 // Try for a Neon vector register.
3382 if (!tryParseNeonVectorRegister(Operands))
3383 return false;
3384
3385 // Otherwise try for a scalar register.
3386 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3387 return false;
3388
3389 return true;
3390}
3391
3392bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3393 MCAsmParser &Parser = getParser();
3394 bool HasELFModifier = false;
3395 AArch64MCExpr::VariantKind RefKind;
3396
3397 if (parseOptionalToken(AsmToken::Colon)) {
3398 HasELFModifier = true;
3399
3400 if (Parser.getTok().isNot(AsmToken::Identifier))
3401 return TokError("expect relocation specifier in operand after ':'");
3402
3403 std::string LowerCase = Parser.getTok().getIdentifier().lower();
3404 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3405 .Case("lo12", AArch64MCExpr::VK_LO12)
3406 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3407 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3408 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3409 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3410 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3411 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3412 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3413 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3414 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3415 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3416 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3417 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3418 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3419 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3420 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3421 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3422 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3423 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3424 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3425 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3426 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3427 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3428 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3429 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3430 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3431 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3432 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3433 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3434 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3435 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3436 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3437 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3438 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3439 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3440 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3441 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3442 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
3443 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3444 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3445 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3446 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3447 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3448 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3449 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3450 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3451 .Default(AArch64MCExpr::VK_INVALID);
3452
3453 if (RefKind == AArch64MCExpr::VK_INVALID)
3454 return TokError("expect relocation specifier in operand after ':'");
3455
3456 Parser.Lex(); // Eat identifier
3457
3458 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3459 return true;
3460 }
3461
3462 if (getParser().parseExpression(ImmVal))
3463 return true;
3464
3465 if (HasELFModifier)
3466 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3467
3468 return false;
3469}
3470
3471template <RegKind VectorKind>
3472OperandMatchResultTy
3473AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3474 bool ExpectMatch) {
3475 MCAsmParser &Parser = getParser();
3476 if (!Parser.getTok().is(AsmToken::LCurly))
3477 return MatchOperand_NoMatch;
3478
3479 // Wrapper around parse function
3480 auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3481 bool NoMatchIsError) {
3482 auto RegTok = Parser.getTok();
3483 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3484 if (ParseRes == MatchOperand_Success) {
3485 if (parseVectorKind(Kind, VectorKind))
3486 return ParseRes;
3487 llvm_unreachable("Expected a valid vector kind")::llvm::llvm_unreachable_internal("Expected a valid vector kind"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3487)
;
3488 }
3489
3490 if (RegTok.isNot(AsmToken::Identifier) ||
3491 ParseRes == MatchOperand_ParseFail ||
3492 (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3493 Error(Loc, "vector register expected");
3494 return MatchOperand_ParseFail;
3495 }
3496
3497 return MatchOperand_NoMatch;
3498 };
3499
3500 SMLoc S = getLoc();
3501 auto LCurly = Parser.getTok();
3502 Parser.Lex(); // Eat left bracket token.
3503
3504 StringRef Kind;
3505 unsigned FirstReg;
3506 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3507
3508 // Put back the original left bracket if there was no match, so that
3509 // different types of list-operands can be matched (e.g. SVE, Neon).
3510 if (ParseRes == MatchOperand_NoMatch)
3511 Parser.getLexer().UnLex(LCurly);
3512
3513 if (ParseRes != MatchOperand_Success)
3514 return ParseRes;
3515
3516 int64_t PrevReg = FirstReg;
3517 unsigned Count = 1;
3518
3519 if (parseOptionalToken(AsmToken::Minus)) {
3520 SMLoc Loc = getLoc();
3521 StringRef NextKind;
3522
3523 unsigned Reg;
3524 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3525 if (ParseRes != MatchOperand_Success)
3526 return ParseRes;
3527
3528 // Any Kind suffices must match on all regs in the list.
3529 if (Kind != NextKind) {
3530 Error(Loc, "mismatched register size suffix");
3531 return MatchOperand_ParseFail;
3532 }
3533
3534 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3535
3536 if (Space == 0 || Space > 3) {
3537 Error(Loc, "invalid number of vectors");
3538 return MatchOperand_ParseFail;
3539 }
3540
3541 Count += Space;
3542 }
3543 else {
3544 while (parseOptionalToken(AsmToken::Comma)) {
3545 SMLoc Loc = getLoc();
3546 StringRef NextKind;
3547 unsigned Reg;
3548 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3549 if (ParseRes != MatchOperand_Success)
3550 return ParseRes;
3551
3552 // Any Kind suffices must match on all regs in the list.
3553 if (Kind != NextKind) {
3554 Error(Loc, "mismatched register size suffix");
3555 return MatchOperand_ParseFail;
3556 }
3557
3558 // Registers must be incremental (with wraparound at 31)
3559 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3560 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3561 Error(Loc, "registers must be sequential");
3562 return MatchOperand_ParseFail;
3563 }
3564
3565 PrevReg = Reg;
3566 ++Count;
3567 }
3568 }
3569
3570 if (parseToken(AsmToken::RCurly, "'}' expected"))
3571 return MatchOperand_ParseFail;
3572
3573 if (Count > 4) {
3574 Error(S, "invalid number of vectors");
3575 return MatchOperand_ParseFail;
3576 }
3577
3578 unsigned NumElements = 0;
3579 unsigned ElementWidth = 0;
3580 if (!Kind.empty()) {
3581 if (const auto &VK = parseVectorKind(Kind, VectorKind))
3582 std::tie(NumElements, ElementWidth) = *VK;
3583 }
3584
3585 Operands.push_back(AArch64Operand::CreateVectorList(
3586 FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3587 getContext()));
3588
3589 return MatchOperand_Success;
3590}
3591
3592/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3593bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3594 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3595 if (ParseRes != MatchOperand_Success)
3596 return true;
3597
3598 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3599}
3600
3601OperandMatchResultTy
3602AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3603 SMLoc StartLoc = getLoc();
3604
3605 unsigned RegNum;
3606 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3607 if (Res != MatchOperand_Success)
3608 return Res;
3609
3610 if (!parseOptionalToken(AsmToken::Comma)) {
3611 Operands.push_back(AArch64Operand::CreateReg(
3612 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3613 return MatchOperand_Success;
3614 }
3615
3616 parseOptionalToken(AsmToken::Hash);
3617
3618 if (getParser().getTok().isNot(AsmToken::Integer)) {
3619 Error(getLoc(), "index must be absent or #0");
3620 return MatchOperand_ParseFail;
3621 }
3622
3623 const MCExpr *ImmVal;
3624 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3625 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3626 Error(getLoc(), "index must be absent or #0");
3627 return MatchOperand_ParseFail;
3628 }
3629
3630 Operands.push_back(AArch64Operand::CreateReg(
3631 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3632 return MatchOperand_Success;
3633}
3634
3635template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3636OperandMatchResultTy
3637AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3638 SMLoc StartLoc = getLoc();
3639
3640 unsigned RegNum;
3641 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3642 if (Res != MatchOperand_Success)
3643 return Res;
3644
3645 // No shift/extend is the default.
3646 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3647 Operands.push_back(AArch64Operand::CreateReg(
3648 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3649 return MatchOperand_Success;
3650 }
3651
3652 // Eat the comma
3653 getParser().Lex();
3654
3655 // Match the shift
3656 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
3657 Res = tryParseOptionalShiftExtend(ExtOpnd);
3658 if (Res != MatchOperand_Success)
3659 return Res;
3660
3661 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3662 Operands.push_back(AArch64Operand::CreateReg(
3663 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3664 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3665 Ext->hasShiftExtendAmount()));
3666
3667 return MatchOperand_Success;
3668}
3669
3670bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3671 MCAsmParser &Parser = getParser();
3672
3673 // Some SVE instructions have a decoration after the immediate, i.e.
3674 // "mul vl". We parse them here and add tokens, which must be present in the
3675 // asm string in the tablegen instruction.
3676 bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3677 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3678 if (!Parser.getTok().getString().equals_lower("mul") ||
3679 !(NextIsVL || NextIsHash))
3680 return true;
3681
3682 Operands.push_back(
3683 AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3684 Parser.Lex(); // Eat the "mul"
3685
3686 if (NextIsVL) {
3687 Operands.push_back(
3688 AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3689 Parser.Lex(); // Eat the "vl"
3690 return false;
3691 }
3692
3693 if (NextIsHash) {
3694 Parser.Lex(); // Eat the #
3695 SMLoc S = getLoc();
3696
3697 // Parse immediate operand.
3698 const MCExpr *ImmVal;
3699 if (!Parser.parseExpression(ImmVal))
3700 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3701 Operands.push_back(AArch64Operand::CreateImm(
3702 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3703 getContext()));
3704 return MatchOperand_Success;
3705 }
3706 }
3707
3708 return Error(getLoc(), "expected 'vl' or '#<imm>'");
3709}
3710
3711bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
3712 MCAsmParser &Parser = getParser();
3713 auto Tok = Parser.getTok();
3714 if (Tok.isNot(AsmToken::Identifier))
3715 return true;
3716 Operands.push_back(AArch64Operand::CreateToken(Tok.getString(), false,
3717 Tok.getLoc(), getContext()));
3718 Parser.Lex();
3719 return false;
3720}
3721
3722/// parseOperand - Parse a arm instruction operand. For now this parses the
3723/// operand regardless of the mnemonic.
3724bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3725 bool invertCondCode) {
3726 MCAsmParser &Parser = getParser();
3727
3728 OperandMatchResultTy ResTy =
3729 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3730
3731 // Check if the current operand has a custom associated parser, if so, try to
3732 // custom parse the operand, or fallback to the general approach.
3733 if (ResTy == MatchOperand_Success)
3734 return false;
3735 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3736 // there was a match, but an error occurred, in which case, just return that
3737 // the operand parsing failed.
3738 if (ResTy == MatchOperand_ParseFail)
3739 return true;
3740
3741 // Nothing custom, so do general case parsing.
3742 SMLoc S, E;
3743 switch (getLexer().getKind()) {
3744 default: {
3745 SMLoc S = getLoc();
3746 const MCExpr *Expr;
3747 if (parseSymbolicImmVal(Expr))
3748 return Error(S, "invalid operand");
3749
3750 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3751 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3752 return false;
3753 }
3754 case AsmToken::LBrac: {
3755 SMLoc Loc = Parser.getTok().getLoc();
3756 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3757 getContext()));
3758 Parser.Lex(); // Eat '['
3759
3760 // There's no comma after a '[', so we can parse the next operand
3761 // immediately.
3762 return parseOperand(Operands, false, false);
3763 }
3764 case AsmToken::LCurly:
3765 return parseNeonVectorList(Operands);
3766 case AsmToken::Identifier: {
3767 // If we're expecting a Condition Code operand, then just parse that.
3768 if (isCondCode)
3769 return parseCondCode(Operands, invertCondCode);
3770
3771 // If it's a register name, parse it.
3772 if (!parseRegister(Operands))
3773 return false;
3774
3775 // See if this is a "mul vl" decoration or "mul #<int>" operand used
3776 // by SVE instructions.
3777 if (!parseOptionalMulOperand(Operands))
3778 return false;
3779
3780 // This could be an optional "shift" or "extend" operand.
3781 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3782 // We can only continue if no tokens were eaten.
3783 if (GotShift != MatchOperand_NoMatch)
3784 return GotShift;
3785
3786 // If this is a two-word mnemonic, parse its special keyword
3787 // operand as an identifier.
3788 if (Mnemonic == "brb")
3789 return parseKeywordOperand(Operands);
3790
3791 // This was not a register so parse other operands that start with an
3792 // identifier (like labels) as expressions and create them as immediates.
3793 const MCExpr *IdVal;
3794 S = getLoc();
3795 if (getParser().parseExpression(IdVal))
3796 return true;
3797 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3798 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3799 return false;
3800 }
3801 case AsmToken::Integer:
3802 case AsmToken::Real:
3803 case AsmToken::Hash: {
3804 // #42 -> immediate.
3805 S = getLoc();
3806
3807 parseOptionalToken(AsmToken::Hash);
3808
3809 // Parse a negative sign
3810 bool isNegative = false;
3811 if (Parser.getTok().is(AsmToken::Minus)) {
3812 isNegative = true;
3813 // We need to consume this token only when we have a Real, otherwise
3814 // we let parseSymbolicImmVal take care of it
3815 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3816 Parser.Lex();
3817 }
3818
3819 // The only Real that should come through here is a literal #0.0 for
3820 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3821 // so convert the value.
3822 const AsmToken &Tok = Parser.getTok();
3823 if (Tok.is(AsmToken::Real)) {
3824 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3825 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3826 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3827 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3828 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3829 return TokError("unexpected floating point literal");
3830 else if (IntVal != 0 || isNegative)
3831 return TokError("expected floating-point constant #0.0");
3832 Parser.Lex(); // Eat the token.
3833
3834 Operands.push_back(
3835 AArch64Operand::CreateToken("#0", false, S, getContext()));
3836 Operands.push_back(
3837 AArch64Operand::CreateToken(".0", false, S, getContext()));
3838 return false;
3839 }
3840
3841 const MCExpr *ImmVal;
3842 if (parseSymbolicImmVal(ImmVal))
3843 return true;
3844
3845 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3846 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3847 return false;
3848 }
3849 case AsmToken::Equal: {
3850 SMLoc Loc = getLoc();
3851 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3852 return TokError("unexpected token in operand");
3853 Parser.Lex(); // Eat '='
3854 const MCExpr *SubExprVal;
3855 if (getParser().parseExpression(SubExprVal))
3856 return true;
3857
3858 if (Operands.size() < 2 ||
3859 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3860 return Error(Loc, "Only valid when first operand is register");
3861
3862 bool IsXReg =
3863 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3864 Operands[1]->getReg());
3865
3866 MCContext& Ctx = getContext();
3867 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3868 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3869 if (isa<MCConstantExpr>(SubExprVal)) {
3870 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3871 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3872 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3873 ShiftAmt += 16;
3874 Imm >>= 16;
3875 }
3876 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3877 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3878 Operands.push_back(AArch64Operand::CreateImm(
3879 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3880 if (ShiftAmt)
3881 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3882 ShiftAmt, true, S, E, Ctx));
3883 return false;
3884 }
3885 APInt Simm = APInt(64, Imm << ShiftAmt);
3886 // check if the immediate is an unsigned or signed 32-bit int for W regs
3887 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3888 return Error(Loc, "Immediate too large for register");
3889 }
3890 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3891 const MCExpr *CPLoc =
3892 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3893 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3894 return false;
3895 }
3896 }
3897}
3898
3899bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
3900 const MCExpr *Expr = nullptr;
3901 SMLoc L = getLoc();
3902 if (check(getParser().parseExpression(Expr), L, "expected expression"))
3903 return true;
3904 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
3905 if (check(!Value, L, "expected constant expression"))
3906 return true;
3907 Out = Value->getValue();
3908 return false;
3909}
3910
3911bool AArch64AsmParser::parseComma() {
3912 if (check(getParser().getTok().isNot(AsmToken::Comma), getLoc(),
3913 "expected comma"))
3914 return true;
3915 // Eat the comma
3916 getParser().Lex();
3917 return false;
3918}
3919
3920bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
3921 unsigned First, unsigned Last) {
3922 unsigned Reg;
23
'Reg' declared without an initial value
3923 SMLoc Start, End;
3924 if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register"))
24
Calling 'AArch64AsmParser::ParseRegister'
33
Returning from 'AArch64AsmParser::ParseRegister'
34
Assuming the condition is false
35
Taking false branch
3925 return true;
3926
3927 // Special handling for FP and LR; they aren't linearly after x28 in
3928 // the registers enum.
3929 unsigned RangeEnd = Last;
3930 if (Base
35.1
'Base' is equal to X0
== AArch64::X0) {
36
Taking true branch
3931 if (Last
36.1
'Last' is not equal to FP
== AArch64::FP) {
37
Taking false branch
3932 RangeEnd = AArch64::X28;
3933 if (Reg == AArch64::FP) {
3934 Out = 29;
3935 return false;
3936 }
3937 }
3938 if (Last
37.1
'Last' is equal to LR
== AArch64::LR) {
38
Taking true branch
3939 RangeEnd = AArch64::X28;
3940 if (Reg == AArch64::FP) {
39
The left operand of '==' is a garbage value
3941 Out = 29;
3942 return false;
3943 } else if (Reg == AArch64::LR) {
3944 Out = 30;
3945 return false;
3946 }
3947 }
3948 }
3949
3950 if (check(Reg < First || Reg > RangeEnd, Start,
3951 Twine("expected register in range ") +
3952 AArch64InstPrinter::getRegisterName(First) + " to " +
3953 AArch64InstPrinter::getRegisterName(Last)))
3954 return true;
3955 Out = Reg - Base;
3956 return false;
3957}
3958
3959bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3960 const MCParsedAsmOperand &Op2) const {
3961 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3962 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3963 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3964 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3965 return MCTargetAsmParser::regsEqual(Op1, Op2);
3966
3967 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&((AOp1.isScalarReg() && AOp2.isScalarReg() &&
"Testing equality of non-scalar registers not supported") ? static_cast
<void> (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3968, __PRETTY_FUNCTION__))
3968 "Testing equality of non-scalar registers not supported")((AOp1.isScalarReg() && AOp2.isScalarReg() &&
"Testing equality of non-scalar registers not supported") ? static_cast
<void> (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3968, __PRETTY_FUNCTION__))
;
3969
3970 // Check if a registers match their sub/super register classes.
3971 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3972 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3973 if (AOp1.getRegEqualityTy() == EqualsSubReg)
3974 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3975 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3976 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3977 if (AOp2.getRegEqualityTy() == EqualsSubReg)
3978 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3979
3980 return false;
3981}
3982
3983/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3984/// operands.
3985bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3986 StringRef Name, SMLoc NameLoc,
3987 OperandVector &Operands) {
3988 MCAsmParser &Parser = getParser();
3989 Name = StringSwitch<StringRef>(Name.lower())
3990 .Case("beq", "b.eq")
3991 .Case("bne", "b.ne")
3992 .Case("bhs", "b.hs")
3993 .Case("bcs", "b.cs")
3994 .Case("blo", "b.lo")
3995 .Case("bcc", "b.cc")
3996 .Case("bmi", "b.mi")
3997 .Case("bpl", "b.pl")
3998 .Case("bvs", "b.vs")
3999 .Case("bvc", "b.vc")
4000 .Case("bhi", "b.hi")
4001 .Case("bls", "b.ls")
4002 .Case("bge", "b.ge")
4003 .Case("blt", "b.lt")
4004 .Case("bgt", "b.gt")
4005 .Case("ble", "b.le")
4006 .Case("bal", "b.al")
4007 .Case("bnv", "b.nv")
4008 .Default(Name);
4009
4010 // First check for the AArch64-specific .req directive.
4011 if (Parser.getTok().is(AsmToken::Identifier) &&
4012 Parser.getTok().getIdentifier().lower() == ".req") {
4013 parseDirectiveReq(Name, NameLoc);
4014 // We always return 'error' for this, as we're done with this
4015 // statement and don't need to match the 'instruction."
4016 return true;
4017 }
4018
4019 // Create the leading tokens for the mnemonic, split by '.' characters.
4020 size_t Start = 0, Next = Name.find('.');
4021 StringRef Head = Name.slice(Start, Next);
4022
4023 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
4024 // the SYS instruction.
4025 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
4026 Head == "cfp" || Head == "dvp" || Head == "cpp")
4027 return parseSysAlias(Head, NameLoc, Operands);
4028
4029 Operands.push_back(
4030 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
4031 Mnemonic = Head;
4032
4033 // Handle condition codes for a branch mnemonic
4034 if (Head == "b" && Next != StringRef::npos) {
4035 Start = Next;
4036 Next = Name.find('.', Start + 1);
4037 Head = Name.slice(Start + 1, Next);
4038
4039 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4040 (Head.data() - Name.data()));
4041 AArch64CC::CondCode CC = parseCondCodeString(Head);
4042 if (CC == AArch64CC::Invalid)
4043 return Error(SuffixLoc, "invalid condition code");
4044 Operands.push_back(
4045 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
4046 Operands.push_back(
4047 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
4048 }
4049
4050 // Add the remaining tokens in the mnemonic.
4051 while (Next != StringRef::npos) {
4052 Start = Next;
4053 Next = Name.find('.', Start + 1);
4054 Head = Name.slice(Start, Next);
4055 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4056 (Head.data() - Name.data()) + 1);
4057 Operands.push_back(
4058 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
4059 }
4060
4061 // Conditional compare instructions have a Condition Code operand, which needs
4062 // to be parsed and an immediate operand created.
4063 bool condCodeFourthOperand =
4064 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
4065 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
4066 Head == "csinc" || Head == "csinv" || Head == "csneg");
4067
4068 // These instructions are aliases to some of the conditional select
4069 // instructions. However, the condition code is inverted in the aliased
4070 // instruction.
4071 //
4072 // FIXME: Is this the correct way to handle these? Or should the parser
4073 // generate the aliased instructions directly?
4074 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
4075 bool condCodeThirdOperand =
4076 (Head == "cinc" || Head == "cinv" || Head == "cneg");
4077
4078 // Read the remaining operands.
4079 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4080
4081 unsigned N = 1;
4082 do {
4083 // Parse and remember the operand.
4084 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
4085 (N == 3 && condCodeThirdOperand) ||
4086 (N == 2 && condCodeSecondOperand),
4087 condCodeSecondOperand || condCodeThirdOperand)) {
4088 return true;
4089 }
4090
4091 // After successfully parsing some operands there are two special cases to
4092 // consider (i.e. notional operands not separated by commas). Both are due
4093 // to memory specifiers:
4094 // + An RBrac will end an address for load/store/prefetch
4095 // + An '!' will indicate a pre-indexed operation.
4096 //
4097 // It's someone else's responsibility to make sure these tokens are sane
4098 // in the given context!
4099
4100 SMLoc RLoc = Parser.getTok().getLoc();
4101 if (parseOptionalToken(AsmToken::RBrac))
4102 Operands.push_back(
4103 AArch64Operand::CreateToken("]", false, RLoc, getContext()));
4104 SMLoc ELoc = Parser.getTok().getLoc();
4105 if (parseOptionalToken(AsmToken::Exclaim))
4106 Operands.push_back(
4107 AArch64Operand::CreateToken("!", false, ELoc, getContext()));
4108
4109 ++N;
4110 } while (parseOptionalToken(AsmToken::Comma));
4111 }
4112
4113 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4114 return true;
4115
4116 return false;
4117}
4118
4119static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
4120 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31))(((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31
)) ? static_cast<void> (0) : __assert_fail ("(ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4120, __PRETTY_FUNCTION__))
;
4121 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
4122 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
4123 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
4124 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
4125 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
4126 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
4127}
4128
4129// FIXME: This entire function is a giant hack to provide us with decent
4130// operand range validation/diagnostics until TableGen/MC can be extended
4131// to support autogeneration of this kind of validation.
4132bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
4133 SmallVectorImpl<SMLoc> &Loc) {
4134 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4135 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
4136
4137 // A prefix only applies to the instruction following it. Here we extract
4138 // prefix information for the next instruction before validating the current
4139 // one so that in the case of failure we don't erronously continue using the
4140 // current prefix.
4141 PrefixInfo Prefix = NextPrefix;
4142 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
4143
4144 // Before validating the instruction in isolation we run through the rules
4145 // applicable when it follows a prefix instruction.
4146 // NOTE: brk & hlt can be prefixed but require no additional validation.
4147 if (Prefix.isActive() &&
4148 (Inst.getOpcode() != AArch64::BRK) &&
4149 (Inst.getOpcode() != AArch64::HLT)) {
4150
4151 // Prefixed intructions must have a destructive operand.
4152 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
4153 AArch64::NotDestructive)
4154 return Error(IDLoc, "instruction is unpredictable when following a"
4155 " movprfx, suggest replacing movprfx with mov");
4156
4157 // Destination operands must match.
4158 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
4159 return Error(Loc[0], "instruction is unpredictable when following a"
4160 " movprfx writing to a different destination");
4161
4162 // Destination operand must not be used in any other location.
4163 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
4164 if (Inst.getOperand(i).isReg() &&
4165 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
4166 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
4167 return Error(Loc[0], "instruction is unpredictable when following a"
4168 " movprfx and destination also used as non-destructive"
4169 " source");
4170 }
4171
4172 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
4173 if (Prefix.isPredicated()) {
4174 int PgIdx = -1;
4175
4176 // Find the instructions general predicate.
4177 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
4178 if (Inst.getOperand(i).isReg() &&
4179 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
4180 PgIdx = i;
4181 break;
4182 }
4183
4184 // Instruction must be predicated if the movprfx is predicated.
4185 if (PgIdx == -1 ||
4186 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
4187 return Error(IDLoc, "instruction is unpredictable when following a"
4188 " predicated movprfx, suggest using unpredicated movprfx");
4189
4190 // Instruction must use same general predicate as the movprfx.
4191 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
4192 return Error(IDLoc, "instruction is unpredictable when following a"
4193 " predicated movprfx using a different general predicate");
4194
4195 // Instruction element type must match the movprfx.
4196 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
4197 return Error(IDLoc, "instruction is unpredictable when following a"
4198 " predicated movprfx with a different element size");
4199 }
4200 }
4201
4202 // Check for indexed addressing modes w/ the base register being the
4203 // same as a destination/source register or pair load where
4204 // the Rt == Rt2. All of those are undefined behaviour.
4205 switch (Inst.getOpcode()) {
4206 case AArch64::LDPSWpre:
4207 case AArch64::LDPWpost:
4208 case AArch64::LDPWpre:
4209 case AArch64::LDPXpost:
4210 case AArch64::LDPXpre: {
4211 unsigned Rt = Inst.getOperand(1).getReg();
4212 unsigned Rt2 = Inst.getOperand(2).getReg();
4213 unsigned Rn = Inst.getOperand(3).getReg();
4214 if (RI->isSubRegisterEq(Rn, Rt))
4215 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
4216 "is also a destination");
4217 if (RI->isSubRegisterEq(Rn, Rt2))
4218 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
4219 "is also a destination");
4220 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4221 }
4222 case AArch64::LDPDi:
4223 case AArch64::LDPQi:
4224 case AArch64::LDPSi:
4225 case AArch64::LDPSWi:
4226 case AArch64::LDPWi:
4227 case AArch64::LDPXi: {
4228 unsigned Rt = Inst.getOperand(0).getReg();
4229 unsigned Rt2 = Inst.getOperand(1).getReg();
4230 if (Rt == Rt2)
4231 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4232 break;
4233 }
4234 case AArch64::LDPDpost:
4235 case AArch64::LDPDpre:
4236 case AArch64::LDPQpost:
4237 case AArch64::LDPQpre:
4238 case AArch64::LDPSpost:
4239 case AArch64::LDPSpre:
4240 case AArch64::LDPSWpost: {
4241 unsigned Rt = Inst.getOperand(1).getReg();
4242 unsigned Rt2 = Inst.getOperand(2).getReg();
4243 if (Rt == Rt2)
4244 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4245 break;
4246 }
4247 case AArch64::STPDpost:
4248 case AArch64::STPDpre:
4249 case AArch64::STPQpost:
4250 case AArch64::STPQpre:
4251 case AArch64::STPSpost:
4252 case AArch64::STPSpre:
4253 case AArch64::STPWpost:
4254 case AArch64::STPWpre:
4255 case AArch64::STPXpost:
4256 case AArch64::STPXpre: {
4257 unsigned Rt = Inst.getOperand(1).getReg();
4258 unsigned Rt2 = Inst.getOperand(2).getReg();
4259 unsigned Rn = Inst.getOperand(3).getReg();
4260 if (RI->isSubRegisterEq(Rn, Rt))
4261 return Error(Loc[0], "unpredictable STP instruction, writeback base "
4262 "is also a source");
4263 if (RI->isSubRegisterEq(Rn, Rt2))
4264 return Error(Loc[1], "unpredictable STP instruction, writeback base "
4265 "is also a source");
4266 break;
4267 }
4268 case AArch64::LDRBBpre:
4269 case AArch64::LDRBpre:
4270 case AArch64::LDRHHpre:
4271 case AArch64::LDRHpre:
4272 case AArch64::LDRSBWpre:
4273 case AArch64::LDRSBXpre:
4274 case AArch64::LDRSHWpre:
4275 case AArch64::LDRSHXpre:
4276 case AArch64::LDRSWpre:
4277 case AArch64::LDRWpre:
4278 case AArch64::LDRXpre:
4279 case AArch64::LDRBBpost:
4280 case AArch64::LDRBpost:
4281 case AArch64::LDRHHpost:
4282 case AArch64::LDRHpost:
4283 case AArch64::LDRSBWpost:
4284 case AArch64::LDRSBXpost:
4285 case AArch64::LDRSHWpost:
4286 case AArch64::LDRSHXpost:
4287 case AArch64::LDRSWpost:
4288 case AArch64::LDRWpost:
4289 case AArch64::LDRXpost: {
4290 unsigned Rt = Inst.getOperand(1).getReg();
4291 unsigned Rn = Inst.getOperand(2).getReg();
4292 if (RI->isSubRegisterEq(Rn, Rt))
4293 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4294 "is also a source");
4295 break;
4296 }
4297 case AArch64::STRBBpost:
4298 case AArch64::STRBpost:
4299 case AArch64::STRHHpost:
4300 case AArch64::STRHpost:
4301 case AArch64::STRWpost:
4302 case AArch64::STRXpost:
4303 case AArch64::STRBBpre:
4304 case AArch64::STRBpre:
4305 case AArch64::STRHHpre:
4306 case AArch64::STRHpre:
4307 case AArch64::STRWpre:
4308 case AArch64::STRXpre: {
4309 unsigned Rt = Inst.getOperand(1).getReg();
4310 unsigned Rn = Inst.getOperand(2).getReg();
4311 if (RI->isSubRegisterEq(Rn, Rt))
4312 return Error(Loc[0], "unpredictable STR instruction, writeback base "
4313 "is also a source");
4314 break;
4315 }
4316 case AArch64::STXRB:
4317 case AArch64::STXRH:
4318 case AArch64::STXRW:
4319 case AArch64::STXRX:
4320 case AArch64::STLXRB:
4321 case AArch64::STLXRH:
4322 case AArch64::STLXRW:
4323 case AArch64::STLXRX: {
4324 unsigned Rs = Inst.getOperand(0).getReg();
4325 unsigned Rt = Inst.getOperand(1).getReg();
4326 unsigned Rn = Inst.getOperand(2).getReg();
4327 if (RI->isSubRegisterEq(Rt, Rs) ||
4328 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4329 return Error(Loc[0],
4330 "unpredictable STXR instruction, status is also a source");
4331 break;
4332 }
4333 case AArch64::STXPW:
4334 case AArch64::STXPX:
4335 case AArch64::STLXPW:
4336 case AArch64::STLXPX: {
4337 unsigned Rs = Inst.getOperand(0).getReg();
4338 unsigned Rt1 = Inst.getOperand(1).getReg();
4339 unsigned Rt2 = Inst.getOperand(2).getReg();
4340 unsigned Rn = Inst.getOperand(3).getReg();
4341 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4342 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4343 return Error(Loc[0],
4344 "unpredictable STXP instruction, status is also a source");
4345 break;
4346 }
4347 case AArch64::LDRABwriteback:
4348 case AArch64::LDRAAwriteback: {
4349 unsigned Xt = Inst.getOperand(0).getReg();
4350 unsigned Xn = Inst.getOperand(1).getReg();
4351 if (Xt == Xn)
4352 return Error(Loc[0],
4353 "unpredictable LDRA instruction, writeback base"
4354 " is also a destination");
4355 break;
4356 }
4357 }
4358
4359
4360 // Now check immediate ranges. Separate from the above as there is overlap
4361 // in the instructions being checked and this keeps the nested conditionals
4362 // to a minimum.
4363 switch (Inst.getOpcode()) {
4364 case AArch64::ADDSWri:
4365 case AArch64::ADDSXri:
4366 case AArch64::ADDWri:
4367 case AArch64::ADDXri:
4368 case AArch64::SUBSWri:
4369 case AArch64::SUBSXri:
4370 case AArch64::SUBWri:
4371 case AArch64::SUBXri: {
4372 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4373 // some slight duplication here.
4374 if (Inst.getOperand(2).isExpr()) {
4375 const MCExpr *Expr = Inst.getOperand(2).getExpr();
4376 AArch64MCExpr::VariantKind ELFRefKind;
4377 MCSymbolRefExpr::VariantKind DarwinRefKind;
4378 int64_t Addend;
4379 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4380
4381 // Only allow these with ADDXri.
4382 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4383 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4384 Inst.getOpcode() == AArch64::ADDXri)
4385 return false;
4386
4387 // Only allow these with ADDXri/ADDWri
4388 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4389 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4390 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4391 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4392 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4393 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4394 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4395 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4396 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4397 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4398 (Inst.getOpcode() == AArch64::ADDXri ||
4399 Inst.getOpcode() == AArch64::ADDWri))
4400 return false;
4401
4402 // Don't allow symbol refs in the immediate field otherwise
4403 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4404 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4405 // 'cmp w0, 'borked')
4406 return Error(Loc.back(), "invalid immediate expression");
4407 }
4408 // We don't validate more complex expressions here
4409 }
4410 return false;
4411 }
4412 default:
4413 return false;
4414 }
4415}
4416
4417static std::string AArch64MnemonicSpellCheck(StringRef S,
4418 const FeatureBitset &FBS,
4419 unsigned VariantID = 0);
4420
4421bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4422 uint64_t ErrorInfo,
4423 OperandVector &Operands) {
4424 switch (ErrCode) {
4425 case Match_InvalidTiedOperand: {
4426 RegConstraintEqualityTy EqTy =
4427 static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4428 .getRegEqualityTy();
4429 switch (EqTy) {
4430 case RegConstraintEqualityTy::EqualsSubReg:
4431 return Error(Loc, "operand must be 64-bit form of destination register");
4432 case RegConstraintEqualityTy::EqualsSuperReg:
4433 return Error(Loc, "operand must be 32-bit form of destination register");
4434 case RegConstraintEqualityTy::EqualsReg:
4435 return Error(Loc, "operand must match destination register");
4436 }
4437 llvm_unreachable("Unknown RegConstraintEqualityTy")::llvm::llvm_unreachable_internal("Unknown RegConstraintEqualityTy"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4437)
;
4438 }
4439 case Match_MissingFeature:
4440 return Error(Loc,
4441 "instruction requires a CPU feature not currently enabled");
4442 case Match_InvalidOperand:
4443 return Error(Loc, "invalid operand for instruction");
4444 case Match_InvalidSuffix:
4445 return Error(Loc, "invalid type suffix for instruction");
4446 case Match_InvalidCondCode:
4447 return Error(Loc, "expected AArch64 condition code");
4448 case Match_AddSubRegExtendSmall:
4449 return Error(Loc,
4450 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4451 case Match_AddSubRegExtendLarge:
4452 return Error(Loc,
4453 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4454 case Match_AddSubSecondSource:
4455 return Error(Loc,
4456 "expected compatible register, symbol or integer in range [0, 4095]");
4457 case Match_LogicalSecondSource:
4458 return Error(Loc, "expected compatible register or logical immediate");
4459 case Match_InvalidMovImm32Shift:
4460 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4461 case Match_InvalidMovImm64Shift:
4462 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4463 case Match_AddSubRegShift32:
4464 return Error(Loc,
4465 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4466 case Match_AddSubRegShift64:
4467 return Error(Loc,
4468 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4469 case Match_InvalidFPImm:
4470 return Error(Loc,
4471 "expected compatible register or floating-point constant");
4472 case Match_InvalidMemoryIndexedSImm6:
4473 return Error(Loc, "index must be an integer in range [-32, 31].");
4474 case Match_InvalidMemoryIndexedSImm5:
4475 return Error(Loc, "index must be an integer in range [-16, 15].");
4476 case Match_InvalidMemoryIndexed1SImm4:
4477 return Error(Loc, "index must be an integer in range [-8, 7].");
4478 case Match_InvalidMemoryIndexed2SImm4:
4479 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4480 case Match_InvalidMemoryIndexed3SImm4:
4481 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4482 case Match_InvalidMemoryIndexed4SImm4:
4483 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4484 case Match_InvalidMemoryIndexed16SImm4:
4485 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4486 case Match_InvalidMemoryIndexed32SImm4:
4487 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
4488 case Match_InvalidMemoryIndexed1SImm6:
4489 return Error(Loc, "index must be an integer in range [-32, 31].");
4490 case Match_InvalidMemoryIndexedSImm8:
4491 return Error(Loc, "index must be an integer in range [-128, 127].");
4492 case Match_InvalidMemoryIndexedSImm9:
4493 return Error(Loc, "index must be an integer in range [-256, 255].");
4494 case Match_InvalidMemoryIndexed16SImm9:
4495 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4496 case Match_InvalidMemoryIndexed8SImm10:
4497 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4498 case Match_InvalidMemoryIndexed4SImm7:
4499 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4500 case Match_InvalidMemoryIndexed8SImm7:
4501 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4502 case Match_InvalidMemoryIndexed16SImm7:
4503 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4504 case Match_InvalidMemoryIndexed8UImm5:
4505 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4506 case Match_InvalidMemoryIndexed4UImm5:
4507 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4508 case Match_InvalidMemoryIndexed2UImm5:
4509 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4510 case Match_InvalidMemoryIndexed8UImm6:
4511 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4512 case Match_InvalidMemoryIndexed16UImm6:
4513 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4514 case Match_InvalidMemoryIndexed4UImm6:
4515 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4516 case Match_InvalidMemoryIndexed2UImm6:
4517 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4518 case Match_InvalidMemoryIndexed1UImm6:
4519 return Error(Loc, "index must be in range [0, 63].");
4520 case Match_InvalidMemoryWExtend8:
4521 return Error(Loc,
4522 "expected 'uxtw' or 'sxtw' with optional shift of #0");
4523 case Match_InvalidMemoryWExtend16:
4524 return Error(Loc,
4525 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4526 case Match_InvalidMemoryWExtend32:
4527 return Error(Loc,
4528 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4529 case Match_InvalidMemoryWExtend64:
4530 return Error(Loc,
4531 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4532 case Match_InvalidMemoryWExtend128:
4533 return Error(Loc,
4534 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4535 case Match_InvalidMemoryXExtend8:
4536 return Error(Loc,
4537 "expected 'lsl' or 'sxtx' with optional shift of #0");
4538 case Match_InvalidMemoryXExtend16:
4539 return Error(Loc,
4540 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4541 case Match_InvalidMemoryXExtend32:
4542 return Error(Loc,
4543 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4544 case Match_InvalidMemoryXExtend64:
4545 return Error(Loc,
4546 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4547 case Match_InvalidMemoryXExtend128:
4548 return Error(Loc,
4549 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4550 case Match_InvalidMemoryIndexed1:
4551 return Error(Loc, "index must be an integer in range [0, 4095].");
4552 case Match_InvalidMemoryIndexed2:
4553 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4554 case Match_InvalidMemoryIndexed4:
4555 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4556 case Match_InvalidMemoryIndexed8:
4557 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4558 case Match_InvalidMemoryIndexed16:
4559 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4560 case Match_InvalidImm0_1:
4561 return Error(Loc, "immediate must be an integer in range [0, 1].");
4562 case Match_InvalidImm0_7:
4563 return Error(Loc, "immediate must be an integer in range [0, 7].");
4564 case Match_InvalidImm0_15:
4565 return Error(Loc, "immediate must be an integer in range [0, 15].");
4566 case Match_InvalidImm0_31:
4567 return Error(Loc, "immediate must be an integer in range [0, 31].");
4568 case Match_InvalidImm0_63:
4569 return Error(Loc, "immediate must be an integer in range [0, 63].");
4570 case Match_InvalidImm0_127:
4571 return Error(Loc, "immediate must be an integer in range [0, 127].");
4572 case Match_InvalidImm0_255:
4573 return Error(Loc, "immediate must be an integer in range [0, 255].");
4574 case Match_InvalidImm0_65535:
4575 return Error(Loc, "immediate must be an integer in range [0, 65535].");
4576 case Match_InvalidImm1_8:
4577 return Error(Loc, "immediate must be an integer in range [1, 8].");
4578 case Match_InvalidImm1_16:
4579 return Error(Loc, "immediate must be an integer in range [1, 16].");
4580 case Match_InvalidImm1_32:
4581 return Error(Loc, "immediate must be an integer in range [1, 32].");
4582 case Match_InvalidImm1_64:
4583 return Error(Loc, "immediate must be an integer in range [1, 64].");
4584 case Match_InvalidSVEAddSubImm8:
4585 return Error(Loc, "immediate must be an integer in range [0, 255]"
4586 " with a shift amount of 0");
4587 case Match_InvalidSVEAddSubImm16:
4588 case Match_InvalidSVEAddSubImm32:
4589 case Match_InvalidSVEAddSubImm64:
4590 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4591 "multiple of 256 in range [256, 65280]");
4592 case Match_InvalidSVECpyImm8:
4593 return Error(Loc, "immediate must be an integer in range [-128, 255]"
4594 " with a shift amount of 0");
4595 case Match_InvalidSVECpyImm16:
4596 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4597 "multiple of 256 in range [-32768, 65280]");
4598 case Match_InvalidSVECpyImm32:
4599 case Match_InvalidSVECpyImm64:
4600 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4601 "multiple of 256 in range [-32768, 32512]");
4602 case Match_InvalidIndexRange1_1:
4603 return Error(Loc, "expected lane specifier '[1]'");
4604 case Match_InvalidIndexRange0_15:
4605 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4606 case Match_InvalidIndexRange0_7:
4607 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4608 case Match_InvalidIndexRange0_3:
4609 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4610 case Match_InvalidIndexRange0_1:
4611 return Error(Loc, "vector lane must be an integer in range [0, 1].");
4612 case Match_InvalidSVEIndexRange0_63:
4613 return Error(Loc, "vector lane must be an integer in range [0, 63].");
4614 case Match_InvalidSVEIndexRange0_31:
4615 return Error(Loc, "vector lane must be an integer in range [0, 31].");
4616 case Match_InvalidSVEIndexRange0_15:
4617 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4618 case Match_InvalidSVEIndexRange0_7:
4619 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4620 case Match_InvalidSVEIndexRange0_3:
4621 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4622 case Match_InvalidLabel:
4623 return Error(Loc, "expected label or encodable integer pc offset");
4624 case Match_MRS:
4625 return Error(Loc, "expected readable system register");
4626 case Match_MSR:
4627 return Error(Loc, "expected writable system register or pstate");
4628 case Match_InvalidComplexRotationEven:
4629 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4630 case Match_InvalidComplexRotationOdd:
4631 return Error(Loc, "complex rotation must be 90 or 270.");
4632 case Match_MnemonicFail: {
4633 std::string Suggestion = AArch64MnemonicSpellCheck(
4634 ((AArch64Operand &)*Operands[0]).getToken(),
4635 ComputeAvailableFeatures(STI->getFeatureBits()));
4636 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4637 }
4638 case Match_InvalidGPR64shifted8:
4639 return Error(Loc, "register must be x0..x30 or xzr, without shift");
4640 case Match_InvalidGPR64shifted16:
4641 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4642 case Match_InvalidGPR64shifted32:
4643 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4644 case Match_InvalidGPR64shifted64:
4645 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4646 case Match_InvalidGPR64NoXZRshifted8:
4647 return Error(Loc, "register must be x0..x30 without shift");
4648 case Match_InvalidGPR64NoXZRshifted16:
4649 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4650 case Match_InvalidGPR64NoXZRshifted32:
4651 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4652 case Match_InvalidGPR64NoXZRshifted64:
4653 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4654 case Match_InvalidZPR32UXTW8:
4655 case Match_InvalidZPR32SXTW8:
4656 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4657 case Match_InvalidZPR32UXTW16:
4658 case Match_InvalidZPR32SXTW16:
4659 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4660 case Match_InvalidZPR32UXTW32:
4661 case Match_InvalidZPR32SXTW32:
4662 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4663 case Match_InvalidZPR32UXTW64:
4664 case Match_InvalidZPR32SXTW64:
4665 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4666 case Match_InvalidZPR64UXTW8:
4667 case Match_InvalidZPR64SXTW8:
4668 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4669 case Match_InvalidZPR64UXTW16:
4670 case Match_InvalidZPR64SXTW16:
4671 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4672 case Match_InvalidZPR64UXTW32:
4673 case Match_InvalidZPR64SXTW32:
4674 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4675 case Match_InvalidZPR64UXTW64:
4676 case Match_InvalidZPR64SXTW64:
4677 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4678 case Match_InvalidZPR32LSL8:
4679 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4680 case Match_InvalidZPR32LSL16:
4681 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4682 case Match_InvalidZPR32LSL32:
4683 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4684 case Match_InvalidZPR32LSL64:
4685 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4686 case Match_InvalidZPR64LSL8:
4687 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4688 case Match_InvalidZPR64LSL16:
4689 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4690 case Match_InvalidZPR64LSL32:
4691 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4692 case Match_InvalidZPR64LSL64:
4693 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4694 case Match_InvalidZPR0:
4695 return Error(Loc, "expected register without element width suffix");
4696 case Match_InvalidZPR8:
4697 case Match_InvalidZPR16:
4698 case Match_InvalidZPR32:
4699 case Match_InvalidZPR64:
4700 case Match_InvalidZPR128:
4701 return Error(Loc, "invalid element width");
4702 case Match_InvalidZPR_3b8:
4703 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4704 case Match_InvalidZPR_3b16:
4705 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4706 case Match_InvalidZPR_3b32:
4707 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4708 case Match_InvalidZPR_4b16:
4709 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4710 case Match_InvalidZPR_4b32:
4711 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4712 case Match_InvalidZPR_4b64:
4713 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4714 case Match_InvalidSVEPattern:
4715 return Error(Loc, "invalid predicate pattern");
4716 case Match_InvalidSVEPredicateAnyReg:
4717 case Match_InvalidSVEPredicateBReg:
4718 case Match_InvalidSVEPredicateHReg:
4719 case Match_InvalidSVEPredicateSReg:
4720 case Match_InvalidSVEPredicateDReg:
4721 return Error(Loc, "invalid predicate register.");
4722 case Match_InvalidSVEPredicate3bAnyReg:
4723 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
4724 case Match_InvalidSVEPredicate3bBReg:
4725 return Error(Loc, "invalid restricted predicate register, expected p0.b..p7.b");
4726 case Match_InvalidSVEPredicate3bHReg:
4727 return Error(Loc, "invalid restricted predicate register, expected p0.h..p7.h");
4728 case Match_InvalidSVEPredicate3bSReg:
4729 return Error(Loc, "invalid restricted predicate register, expected p0.s..p7.s");
4730 case Match_InvalidSVEPredicate3bDReg:
4731 return Error(Loc, "invalid restricted predicate register, expected p0.d..p7.d");
4732 case Match_InvalidSVEExactFPImmOperandHalfOne:
4733 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4734 case Match_InvalidSVEExactFPImmOperandHalfTwo:
4735 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4736 case Match_InvalidSVEExactFPImmOperandZeroOne:
4737 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4738 default:
4739 llvm_unreachable("unexpected error code!")::llvm::llvm_unreachable_internal("unexpected error code!", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4739)
;
4740 }
4741}
4742
4743static const char *getSubtargetFeatureName(uint64_t Val);
4744
4745bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4746 OperandVector &Operands,
4747 MCStreamer &Out,
4748 uint64_t &ErrorInfo,
4749 bool MatchingInlineAsm) {
4750 assert(!Operands.empty() && "Unexpect empty operand list!")((!Operands.empty() && "Unexpect empty operand list!"
) ? static_cast<void> (0) : __assert_fail ("!Operands.empty() && \"Unexpect empty operand list!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4750, __PRETTY_FUNCTION__))
;
4751 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4752 assert(Op.isToken() && "Leading operand should always be a mnemonic!")((Op.isToken() && "Leading operand should always be a mnemonic!"
) ? static_cast<void> (0) : __assert_fail ("Op.isToken() && \"Leading operand should always be a mnemonic!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4752, __PRETTY_FUNCTION__))
;
4753
4754 StringRef Tok = Op.getToken();
4755 unsigned NumOperands = Operands.size();
4756
4757 if (NumOperands == 4 && Tok == "lsl") {
4758 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4759 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4760 if (Op2.isScalarReg() && Op3.isImm()) {
4761 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4762 if (Op3CE) {
4763 uint64_t Op3Val = Op3CE->getValue();
4764 uint64_t NewOp3Val = 0;
4765 uint64_t NewOp4Val = 0;
4766 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4767 Op2.getReg())) {
4768 NewOp3Val = (32 - Op3Val) & 0x1f;
4769 NewOp4Val = 31 - Op3Val;
4770 } else {
4771 NewOp3Val = (64 - Op3Val) & 0x3f;
4772 NewOp4Val = 63 - Op3Val;
4773 }
4774
4775 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4776 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4777
4778 Operands[0] = AArch64Operand::CreateToken(
4779 "ubfm", false, Op.getStartLoc(), getContext());
4780 Operands.push_back(AArch64Operand::CreateImm(
4781 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4782 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4783 Op3.getEndLoc(), getContext());
4784 }
4785 }
4786 } else if (NumOperands == 4 && Tok == "bfc") {
4787 // FIXME: Horrible hack to handle BFC->BFM alias.
4788 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4789 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4790 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4791
4792 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4793 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4794 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4795
4796 if (LSBCE && WidthCE) {
4797 uint64_t LSB = LSBCE->getValue();
4798 uint64_t Width = WidthCE->getValue();
4799
4800 uint64_t RegWidth = 0;
4801 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4802 Op1.getReg()))
4803 RegWidth = 64;
4804 else
4805 RegWidth = 32;
4806
4807 if (LSB >= RegWidth)
4808 return Error(LSBOp.getStartLoc(),
4809 "expected integer in range [0, 31]");
4810 if (Width < 1 || Width > RegWidth)
4811 return Error(WidthOp.getStartLoc(),
4812 "expected integer in range [1, 32]");
4813
4814 uint64_t ImmR = 0;
4815 if (RegWidth == 32)
4816 ImmR = (32 - LSB) & 0x1f;
4817 else
4818 ImmR = (64 - LSB) & 0x3f;
4819
4820 uint64_t ImmS = Width - 1;
4821
4822 if (ImmR != 0 && ImmS >= ImmR)
4823 return Error(WidthOp.getStartLoc(),
4824 "requested insert overflows register");
4825
4826 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4827 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4828 Operands[0] = AArch64Operand::CreateToken(
4829 "bfm", false, Op.getStartLoc(), getContext());
4830 Operands[2] = AArch64Operand::CreateReg(
4831 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4832 SMLoc(), SMLoc(), getContext());
4833 Operands[3] = AArch64Operand::CreateImm(
4834 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4835 Operands.emplace_back(
4836 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4837 WidthOp.getEndLoc(), getContext()));
4838 }
4839 }
4840 } else if (NumOperands == 5) {
4841 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4842 // UBFIZ -> UBFM aliases.
4843 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4844 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4845 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4846 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4847
4848 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4849 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4850 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4851
4852 if (Op3CE && Op4CE) {
4853 uint64_t Op3Val = Op3CE->getValue();
4854 uint64_t Op4Val = Op4CE->getValue();
4855
4856 uint64_t RegWidth = 0;
4857 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4858 Op1.getReg()))
4859 RegWidth = 64;
4860 else
4861 RegWidth = 32;
4862
4863 if (Op3Val >= RegWidth)
4864 return Error(Op3.getStartLoc(),
4865 "expected integer in range [0, 31]");
4866 if (Op4Val < 1 || Op4Val > RegWidth)
4867 return Error(Op4.getStartLoc(),
4868 "expected integer in range [1, 32]");
4869
4870 uint64_t NewOp3Val = 0;
4871 if (RegWidth == 32)
4872 NewOp3Val = (32 - Op3Val) & 0x1f;
4873 else
4874 NewOp3Val = (64 - Op3Val) & 0x3f;
4875
4876 uint64_t NewOp4Val = Op4Val - 1;
4877
4878 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4879 return Error(Op4.getStartLoc(),
4880 "requested insert overflows register");
4881
4882 const MCExpr *NewOp3 =
4883 MCConstantExpr::create(NewOp3Val, getContext());
4884 const MCExpr *NewOp4 =
4885 MCConstantExpr::create(NewOp4Val, getContext());
4886 Operands[3] = AArch64Operand::CreateImm(
4887 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4888 Operands[4] = AArch64Operand::CreateImm(
4889 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4890 if (Tok == "bfi")
4891 Operands[0] = AArch64Operand::CreateToken(
4892 "bfm", false, Op.getStartLoc(), getContext());
4893 else if (Tok == "sbfiz")
4894 Operands[0] = AArch64Operand::CreateToken(
4895 "sbfm", false, Op.getStartLoc(), getContext());
4896 else if (Tok == "ubfiz")
4897 Operands[0] = AArch64Operand::CreateToken(
4898 "ubfm", false, Op.getStartLoc(), getContext());
4899 else
4900 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4900)
;
4901 }
4902 }
4903
4904 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4905 // UBFX -> UBFM aliases.
4906 } else if (NumOperands == 5 &&
4907 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4908 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4909 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4910 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4911
4912 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4913 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4914 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4915
4916 if (Op3CE && Op4CE) {
4917 uint64_t Op3Val = Op3CE->getValue();
4918 uint64_t Op4Val = Op4CE->getValue();
4919
4920 uint64_t RegWidth = 0;
4921 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4922 Op1.getReg()))
4923 RegWidth = 64;
4924 else
4925 RegWidth = 32;
4926
4927 if (Op3Val >= RegWidth)
4928 return Error(Op3.getStartLoc(),
4929 "expected integer in range [0, 31]");
4930 if (Op4Val < 1 || Op4Val > RegWidth)
4931 return Error(Op4.getStartLoc(),
4932 "expected integer in range [1, 32]");
4933
4934 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4935
4936 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4937 return Error(Op4.getStartLoc(),
4938 "requested extract overflows register");
4939
4940 const MCExpr *NewOp4 =
4941 MCConstantExpr::create(NewOp4Val, getContext());
4942 Operands[4] = AArch64Operand::CreateImm(
4943 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4944 if (Tok == "bfxil")
4945 Operands[0] = AArch64Operand::CreateToken(
4946 "bfm", false, Op.getStartLoc(), getContext());
4947 else if (Tok == "sbfx")
4948 Operands[0] = AArch64Operand::CreateToken(
4949 "sbfm", false, Op.getStartLoc(), getContext());
4950 else if (Tok == "ubfx")
4951 Operands[0] = AArch64Operand::CreateToken(
4952 "ubfm", false, Op.getStartLoc(), getContext());
4953 else
4954 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4954)
;
4955 }
4956 }
4957 }
4958 }
4959
4960 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4961 // instruction for FP registers correctly in some rare circumstances. Convert
4962 // it to a safe instruction and warn (because silently changing someone's
4963 // assembly is rude).
4964 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4965 NumOperands == 4 && Tok == "movi") {
4966 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4967 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4968 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4969 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4970 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4971 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4972 if (Suffix.lower() == ".2d" &&
4973 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4974 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4975 " correctly on this CPU, converting to equivalent movi.16b");
4976 // Switch the suffix to .16b.
4977 unsigned Idx = Op1.isToken() ? 1 : 2;
4978 Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4979 getContext());
4980 }
4981 }
4982 }
4983
4984 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4985 // InstAlias can't quite handle this since the reg classes aren't
4986 // subclasses.
4987 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4988 // The source register can be Wn here, but the matcher expects a
4989 // GPR64. Twiddle it here if necessary.
4990 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4991 if (Op.isScalarReg()) {
4992 unsigned Reg = getXRegFromWReg(Op.getReg());
4993 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4994 Op.getStartLoc(), Op.getEndLoc(),
4995 getContext());
4996 }
4997 }
4998 // FIXME: Likewise for sxt[bh] with a Xd dst operand
4999 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
5000 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5001 if (Op.isScalarReg() &&
5002 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5003 Op.getReg())) {
5004 // The source register can be Wn here, but the matcher expects a
5005 // GPR64. Twiddle it here if necessary.
5006 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5007 if (Op.isScalarReg()) {
5008 unsigned Reg = getXRegFromWReg(Op.getReg());
5009 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5010 Op.getStartLoc(),
5011 Op.getEndLoc(), getContext());
5012 }
5013 }
5014 }
5015 // FIXME: Likewise for uxt[bh] with a Xd dst operand
5016 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
5017 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5018 if (Op.isScalarReg() &&
5019 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5020 Op.getReg())) {
5021 // The source register can be Wn here, but the matcher expects a
5022 // GPR32. Twiddle it here if necessary.
5023 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5024 if (Op.isScalarReg()) {
5025 unsigned Reg = getWRegFromXReg(Op.getReg());
5026 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5027 Op.getStartLoc(),
5028 Op.getEndLoc(), getContext());
5029 }
5030 }
5031 }
5032
5033 MCInst Inst;
5034 FeatureBitset MissingFeatures;
5035 // First try to match against the secondary set of tables containing the
5036 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
5037 unsigned MatchResult =
5038 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5039 MatchingInlineAsm, 1);
5040
5041 // If that fails, try against the alternate table containing long-form NEON:
5042 // "fadd v0.2s, v1.2s, v2.2s"
5043 if (MatchResult != Match_Success) {
5044 // But first, save the short-form match result: we can use it in case the
5045 // long-form match also fails.
5046 auto ShortFormNEONErrorInfo = ErrorInfo;
5047 auto ShortFormNEONMatchResult = MatchResult;
5048 auto ShortFormNEONMissingFeatures = MissingFeatures;
5049
5050 MatchResult =
5051 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5052 MatchingInlineAsm, 0);
5053
5054 // Now, both matches failed, and the long-form match failed on the mnemonic
5055 // suffix token operand. The short-form match failure is probably more
5056 // relevant: use it instead.
5057 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
5058 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
5059 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
5060 MatchResult = ShortFormNEONMatchResult;
5061 ErrorInfo = ShortFormNEONErrorInfo;
5062 MissingFeatures = ShortFormNEONMissingFeatures;
5063 }
5064 }
5065
5066 switch (MatchResult) {
5067 case Match_Success: {
5068 // Perform range checking and other semantic validations
5069 SmallVector<SMLoc, 8> OperandLocs;
5070 NumOperands = Operands.size();
5071 for (unsigned i = 1; i < NumOperands; ++i)
5072 OperandLocs.push_back(Operands[i]->getStartLoc());
5073 if (validateInstruction(Inst, IDLoc, OperandLocs))
5074 return true;
5075
5076 Inst.setLoc(IDLoc);
5077 Out.emitInstruction(Inst, getSTI());
5078 return false;
5079 }
5080 case Match_MissingFeature: {
5081 assert(MissingFeatures.any() && "Unknown missing feature!")((MissingFeatures.any() && "Unknown missing feature!"
) ? static_cast<void> (0) : __assert_fail ("MissingFeatures.any() && \"Unknown missing feature!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5081, __PRETTY_FUNCTION__))
;
5082 // Special case the error message for the very common case where only
5083 // a single subtarget feature is missing (neon, e.g.).
5084 std::string Msg = "instruction requires:";
5085 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
5086 if (MissingFeatures[i]) {
5087 Msg += " ";
5088 Msg += getSubtargetFeatureName(i);
5089 }
5090 }
5091 return Error(IDLoc, Msg);
5092 }
5093 case Match_MnemonicFail:
5094 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
5095 case Match_InvalidOperand: {
5096 SMLoc ErrorLoc = IDLoc;
5097
5098 if (ErrorInfo != ~0ULL) {
5099 if (ErrorInfo >= Operands.size())
5100 return Error(IDLoc, "too few operands for instruction",
5101 SMRange(IDLoc, getTok().getLoc()));
5102
5103 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5104 if (ErrorLoc == SMLoc())
5105 ErrorLoc = IDLoc;
5106 }
5107 // If the match failed on a suffix token operand, tweak the diagnostic
5108 // accordingly.
5109 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
5110 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
5111 MatchResult = Match_InvalidSuffix;
5112
5113 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5114 }
5115 case Match_InvalidTiedOperand:
5116 case Match_InvalidMemoryIndexed1:
5117 case Match_InvalidMemoryIndexed2:
5118 case Match_InvalidMemoryIndexed4:
5119 case Match_InvalidMemoryIndexed8:
5120 case Match_InvalidMemoryIndexed16:
5121 case Match_InvalidCondCode:
5122 case Match_AddSubRegExtendSmall:
5123 case Match_AddSubRegExtendLarge:
5124 case Match_AddSubSecondSource:
5125 case Match_LogicalSecondSource:
5126 case Match_AddSubRegShift32:
5127 case Match_AddSubRegShift64:
5128 case Match_InvalidMovImm32Shift:
5129 case Match_InvalidMovImm64Shift:
5130 case Match_InvalidFPImm:
5131 case Match_InvalidMemoryWExtend8:
5132 case Match_InvalidMemoryWExtend16:
5133 case Match_InvalidMemoryWExtend32:
5134 case Match_InvalidMemoryWExtend64:
5135 case Match_InvalidMemoryWExtend128:
5136 case Match_InvalidMemoryXExtend8:
5137 case Match_InvalidMemoryXExtend16:
5138 case Match_InvalidMemoryXExtend32:
5139 case Match_InvalidMemoryXExtend64:
5140 case Match_InvalidMemoryXExtend128:
5141 case Match_InvalidMemoryIndexed1SImm4:
5142 case Match_InvalidMemoryIndexed2SImm4:
5143 case Match_InvalidMemoryIndexed3SImm4:
5144 case Match_InvalidMemoryIndexed4SImm4:
5145 case Match_InvalidMemoryIndexed1SImm6:
5146 case Match_InvalidMemoryIndexed16SImm4:
5147 case Match_InvalidMemoryIndexed32SImm4:
5148 case Match_InvalidMemoryIndexed4SImm7:
5149 case Match_InvalidMemoryIndexed8SImm7:
5150 case Match_InvalidMemoryIndexed16SImm7:
5151 case Match_InvalidMemoryIndexed8UImm5:
5152 case Match_InvalidMemoryIndexed4UImm5:
5153 case Match_InvalidMemoryIndexed2UImm5:
5154 case Match_InvalidMemoryIndexed1UImm6:
5155 case Match_InvalidMemoryIndexed2UImm6:
5156 case Match_InvalidMemoryIndexed4UImm6:
5157 case Match_InvalidMemoryIndexed8UImm6:
5158 case Match_InvalidMemoryIndexed16UImm6:
5159 case Match_InvalidMemoryIndexedSImm6:
5160 case Match_InvalidMemoryIndexedSImm5:
5161 case Match_InvalidMemoryIndexedSImm8:
5162 case Match_InvalidMemoryIndexedSImm9:
5163 case Match_InvalidMemoryIndexed16SImm9:
5164 case Match_InvalidMemoryIndexed8SImm10:
5165 case Match_InvalidImm0_1:
5166 case Match_InvalidImm0_7:
5167 case Match_InvalidImm0_15:
5168 case Match_InvalidImm0_31:
5169 case Match_InvalidImm0_63:
5170 case Match_InvalidImm0_127:
5171 case Match_InvalidImm0_255:
5172 case Match_InvalidImm0_65535:
5173 case Match_InvalidImm1_8:
5174 case Match_InvalidImm1_16:
5175 case Match_InvalidImm1_32:
5176 case Match_InvalidImm1_64:
5177 case Match_InvalidSVEAddSubImm8:
5178 case Match_InvalidSVEAddSubImm16:
5179 case Match_InvalidSVEAddSubImm32:
5180 case Match_InvalidSVEAddSubImm64:
5181 case Match_InvalidSVECpyImm8:
5182 case Match_InvalidSVECpyImm16:
5183 case Match_InvalidSVECpyImm32:
5184 case Match_InvalidSVECpyImm64:
5185 case Match_InvalidIndexRange1_1:
5186 case Match_InvalidIndexRange0_15:
5187 case Match_InvalidIndexRange0_7:
5188 case Match_InvalidIndexRange0_3:
5189 case Match_InvalidIndexRange0_1:
5190 case Match_InvalidSVEIndexRange0_63:
5191 case Match_InvalidSVEIndexRange0_31:
5192 case Match_InvalidSVEIndexRange0_15:
5193 case Match_InvalidSVEIndexRange0_7:
5194 case Match_InvalidSVEIndexRange0_3:
5195 case Match_InvalidLabel:
5196 case Match_InvalidComplexRotationEven:
5197 case Match_InvalidComplexRotationOdd:
5198 case Match_InvalidGPR64shifted8:
5199 case Match_InvalidGPR64shifted16:
5200 case Match_InvalidGPR64shifted32:
5201 case Match_InvalidGPR64shifted64:
5202 case Match_InvalidGPR64NoXZRshifted8:
5203 case Match_InvalidGPR64NoXZRshifted16:
5204 case Match_InvalidGPR64NoXZRshifted32:
5205 case Match_InvalidGPR64NoXZRshifted64:
5206 case Match_InvalidZPR32UXTW8:
5207 case Match_InvalidZPR32UXTW16:
5208 case Match_InvalidZPR32UXTW32:
5209 case Match_InvalidZPR32UXTW64:
5210 case Match_InvalidZPR32SXTW8:
5211 case Match_InvalidZPR32SXTW16:
5212 case Match_InvalidZPR32SXTW32:
5213 case Match_InvalidZPR32SXTW64:
5214 case Match_InvalidZPR64UXTW8:
5215 case Match_InvalidZPR64SXTW8:
5216 case Match_InvalidZPR64UXTW16:
5217 case Match_InvalidZPR64SXTW16:
5218 case Match_InvalidZPR64UXTW32:
5219 case Match_InvalidZPR64SXTW32:
5220 case Match_InvalidZPR64UXTW64:
5221 case Match_InvalidZPR64SXTW64:
5222 case Match_InvalidZPR32LSL8:
5223 case Match_InvalidZPR32LSL16:
5224 case Match_InvalidZPR32LSL32:
5225 case Match_InvalidZPR32LSL64:
5226 case Match_InvalidZPR64LSL8:
5227 case Match_InvalidZPR64LSL16:
5228 case Match_InvalidZPR64LSL32:
5229 case Match_InvalidZPR64LSL64:
5230 case Match_InvalidZPR0:
5231 case Match_InvalidZPR8:
5232 case Match_InvalidZPR16:
5233 case Match_InvalidZPR32:
5234 case Match_InvalidZPR64:
5235 case Match_InvalidZPR128:
5236 case Match_InvalidZPR_3b8:
5237 case Match_InvalidZPR_3b16:
5238 case Match_InvalidZPR_3b32:
5239 case Match_InvalidZPR_4b16:
5240 case Match_InvalidZPR_4b32:
5241 case Match_InvalidZPR_4b64:
5242 case Match_InvalidSVEPredicateAnyReg:
5243 case Match_InvalidSVEPattern:
5244 case Match_InvalidSVEPredicateBReg:
5245 case Match_InvalidSVEPredicateHReg:
5246 case Match_InvalidSVEPredicateSReg:
5247 case Match_InvalidSVEPredicateDReg:
5248 case Match_InvalidSVEPredicate3bAnyReg:
5249 case Match_InvalidSVEPredicate3bBReg:
5250 case Match_InvalidSVEPredicate3bHReg:
5251 case Match_InvalidSVEPredicate3bSReg:
5252 case Match_InvalidSVEPredicate3bDReg:
5253 case Match_InvalidSVEExactFPImmOperandHalfOne:
5254 case Match_InvalidSVEExactFPImmOperandHalfTwo:
5255 case Match_InvalidSVEExactFPImmOperandZeroOne:
5256 case Match_MSR:
5257 case Match_MRS: {
5258 if (ErrorInfo >= Operands.size())
5259 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5260 // Any time we get here, there's nothing fancy to do. Just get the
5261 // operand SMLoc and display the diagnostic.
5262 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5263 if (ErrorLoc == SMLoc())
5264 ErrorLoc = IDLoc;
5265 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5266 }
5267 }
5268
5269 llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5269)
;
5270}
5271
5272/// ParseDirective parses the arm specific directives
5273bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5274 const MCObjectFileInfo::Environment Format =
5275 getContext().getObjectFileInfo()->getObjectFileType();
5276 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
1
Assuming 'Format' is not equal to IsMachO
5277 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
2
Assuming 'Format' is equal to IsCOFF
5278
5279 auto IDVal = DirectiveID.getIdentifier().lower();
5280 SMLoc Loc = DirectiveID.getLoc();
5281 if (IDVal == ".arch")
3
Taking false branch
5282 parseDirectiveArch(Loc);
5283 else if (IDVal == ".cpu")
4
Taking false branch
5284 parseDirectiveCPU(Loc);
5285 else if (IDVal == ".tlsdesccall")
5
Taking false branch
5286 parseDirectiveTLSDescCall(Loc);
5287 else if (IDVal == ".ltorg" || IDVal == ".pool")
6
Taking false branch
5288 parseDirectiveLtorg(Loc);
5289 else if (IDVal == ".unreq")
7
Taking false branch
5290 parseDirectiveUnreq(Loc);
5291 else if (IDVal == ".inst")
8
Taking false branch
5292 parseDirectiveInst(Loc);
5293 else if (IDVal == ".cfi_negate_ra_state")
9
Taking false branch
5294 parseDirectiveCFINegateRAState();
5295 else if (IDVal == ".cfi_b_key_frame")
10
Taking false branch
5296 parseDirectiveCFIBKeyFrame();
5297 else if (IDVal == ".arch_extension")
11
Taking false branch
5298 parseDirectiveArchExtension(Loc);
5299 else if (IDVal == ".variant_pcs")
12
Taking false branch
5300 parseDirectiveVariantPCS(Loc);
5301 else if (IsMachO
12.1
'IsMachO' is false
) {
13
Taking false branch
5302 if (IDVal == MCLOHDirectiveName())
5303 parseDirectiveLOH(IDVal, Loc);
5304 else
5305 return true;
5306 } else if (IsCOFF
13.1
'IsCOFF' is true
) {
14
Taking true branch
5307 if (IDVal == ".seh_stackalloc")
15
Taking false branch
5308 parseDirectiveSEHAllocStack(Loc);
5309 else if (IDVal == ".seh_endprologue")
16
Taking false branch
5310 parseDirectiveSEHPrologEnd(Loc);
5311 else if (IDVal == ".seh_save_r19r20_x")
17
Taking false branch
5312 parseDirectiveSEHSaveR19R20X(Loc);
5313 else if (IDVal == ".seh_save_fplr")
18
Taking false branch
5314 parseDirectiveSEHSaveFPLR(Loc);
5315 else if (IDVal == ".seh_save_fplr_x")
19
Taking false branch
5316 parseDirectiveSEHSaveFPLRX(Loc);
5317 else if (IDVal == ".seh_save_reg")
20
Taking true branch
5318 parseDirectiveSEHSaveReg(Loc);
21
Calling 'AArch64AsmParser::parseDirectiveSEHSaveReg'
5319 else if (IDVal == ".seh_save_reg_x")
5320 parseDirectiveSEHSaveRegX(Loc);
5321 else if (IDVal == ".seh_save_regp")
5322 parseDirectiveSEHSaveRegP(Loc);
5323 else if (IDVal == ".seh_save_regp_x")
5324 parseDirectiveSEHSaveRegPX(Loc);
5325 else if (IDVal == ".seh_save_lrpair")
5326 parseDirectiveSEHSaveLRPair(Loc);
5327 else if (IDVal == ".seh_save_freg")
5328 parseDirectiveSEHSaveFReg(Loc);
5329 else if (IDVal == ".seh_save_freg_x")
5330 parseDirectiveSEHSaveFRegX(Loc);
5331 else if (IDVal == ".seh_save_fregp")
5332 parseDirectiveSEHSaveFRegP(Loc);
5333 else if (IDVal == ".seh_save_fregp_x")
5334 parseDirectiveSEHSaveFRegPX(Loc);
5335 else if (IDVal == ".seh_set_fp")
5336 parseDirectiveSEHSetFP(Loc);
5337 else if (IDVal == ".seh_add_fp")
5338 parseDirectiveSEHAddFP(Loc);
5339 else if (IDVal == ".seh_nop")
5340 parseDirectiveSEHNop(Loc);
5341 else if (IDVal == ".seh_save_next")
5342 parseDirectiveSEHSaveNext(Loc);
5343 else if (IDVal == ".seh_startepilogue")
5344 parseDirectiveSEHEpilogStart(Loc);
5345 else if (IDVal == ".seh_endepilogue")
5346 parseDirectiveSEHEpilogEnd(Loc);
5347 else if (IDVal == ".seh_trap_frame")
5348 parseDirectiveSEHTrapFrame(Loc);
5349 else if (IDVal == ".seh_pushframe")
5350 parseDirectiveSEHMachineFrame(Loc);
5351 else if (IDVal == ".seh_context")
5352 parseDirectiveSEHContext(Loc);
5353 else if (IDVal == ".seh_clear_unwound_to_call")
5354 parseDirectiveSEHClearUnwoundToCall(Loc);
5355 else
5356 return true;
5357 } else
5358 return true;
5359 return false;
5360}
5361
5362static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
5363 SmallVector<StringRef, 4> &RequestedExtensions) {
5364 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
5365 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
5366
5367 if (!NoCrypto && Crypto) {
5368 switch (ArchKind) {
5369 default:
5370 // Map 'generic' (and others) to sha2 and aes, because
5371 // that was the traditional meaning of crypto.
5372 case AArch64::ArchKind::ARMV8_1A:
5373 case AArch64::ArchKind::ARMV8_2A:
5374 case AArch64::ArchKind::ARMV8_3A:
5375 RequestedExtensions.push_back("sha2");
5376 RequestedExtensions.push_back("aes");
5377 break;
5378 case AArch64::ArchKind::ARMV8_4A:
5379 case AArch64::ArchKind::ARMV8_5A:
5380 case AArch64::ArchKind::ARMV8_6A:
5381 case AArch64::ArchKind::ARMV8_7A:
5382 case AArch64::ArchKind::ARMV8R:
5383 RequestedExtensions.push_back("sm4");
5384 RequestedExtensions.push_back("sha3");
5385 RequestedExtensions.push_back("sha2");
5386 RequestedExtensions.push_back("aes");
5387 break;
5388 }
5389 } else if (NoCrypto) {
5390 switch (ArchKind) {
5391 default:
5392 // Map 'generic' (and others) to sha2 and aes, because
5393 // that was the traditional meaning of crypto.
5394 case AArch64::ArchKind::ARMV8_1A:
5395 case AArch64::ArchKind::ARMV8_2A:
5396 case AArch64::ArchKind::ARMV8_3A:
5397 RequestedExtensions.push_back("nosha2");
5398 RequestedExtensions.push_back("noaes");
5399 break;
5400 case AArch64::ArchKind::ARMV8_4A:
5401 case AArch64::ArchKind::ARMV8_5A:
5402 case AArch64::ArchKind::ARMV8_6A:
5403 case AArch64::ArchKind::ARMV8_7A:
5404 RequestedExtensions.push_back("nosm4");
5405 RequestedExtensions.push_back("nosha3");
5406 RequestedExtensions.push_back("nosha2");
5407 RequestedExtensions.push_back("noaes");
5408 break;
5409 }
5410 }
5411}
5412
5413/// parseDirectiveArch
5414/// ::= .arch token
5415bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
5416 SMLoc ArchLoc = getLoc();
5417
5418 StringRef Arch, ExtensionString;
5419 std::tie(Arch, ExtensionString) =
5420 getParser().parseStringToEndOfStatement().trim().split('+');
5421
5422 AArch64::ArchKind ID = AArch64::parseArch(Arch);
5423 if (ID == AArch64::ArchKind::INVALID)
5424 return Error(ArchLoc, "unknown arch name");
5425
5426 if (parseToken(AsmToken::EndOfStatement))
5427 return true;
5428
5429 // Get the architecture and extension features.
5430 std::vector<StringRef> AArch64Features;
5431 AArch64::getArchFeatures(ID, AArch64Features);
5432 AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
5433 AArch64Features);
5434
5435 MCSubtargetInfo &STI = copySTI();
5436 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
5437 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
5438 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
5439
5440 SmallVector<StringRef, 4> RequestedExtensions;
5441 if (!ExtensionString.empty())
5442 ExtensionString.split(RequestedExtensions, '+');
5443
5444 ExpandCryptoAEK(ID, RequestedExtensions);
5445
5446 FeatureBitset Features = STI.getFeatureBits();
5447 for (auto Name : RequestedExtensions) {
5448 bool EnableFeature = true;
5449
5450 if (Name.startswith_lower("no")) {
5451 EnableFeature = false;
5452 Name = Name.substr(2);
5453 }
5454
5455 for (const auto &Extension : ExtensionMap) {
5456 if (Extension.Name != Name)
5457 continue;
5458
5459 if (Extension.Features.none())
5460 report_fatal_error("unsupported architectural extension: " + Name);
5461
5462 FeatureBitset ToggleFeatures = EnableFeature
5463 ? (~Features & Extension.Features)
5464 : ( Features & Extension.Features);
5465 FeatureBitset Features =
5466 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5467 setAvailableFeatures(Features);
5468 break;
5469 }
5470 }
5471 return false;
5472}
5473
5474/// parseDirectiveArchExtension
5475/// ::= .arch_extension [no]feature
5476bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
5477 SMLoc ExtLoc = getLoc();
5478
5479 StringRef Name = getParser().parseStringToEndOfStatement().trim();
5480
5481 if (parseToken(AsmToken::EndOfStatement,
5482 "unexpected token in '.arch_extension' directive"))
5483 return true;
5484
5485 bool EnableFeature = true;
5486 if (Name.startswith_lower("no")) {
5487 EnableFeature = false;
5488 Name = Name.substr(2);
5489 }
5490
5491 MCSubtargetInfo &STI = copySTI();
5492 FeatureBitset Features = STI.getFeatureBits();
5493 for (const auto &Extension : ExtensionMap) {
5494 if (Extension.Name != Name)
5495 continue;
5496
5497 if (Extension.Features.none())
5498 return Error(ExtLoc, "unsupported architectural extension: " + Name);
5499
5500 FeatureBitset ToggleFeatures = EnableFeature
5501 ? (~Features & Extension.Features)
5502 : (Features & Extension.Features);
5503 FeatureBitset Features =
5504 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5505 setAvailableFeatures(Features);
5506 return false;
5507 }
5508
5509 return Error(ExtLoc, "unknown architectural extension: " + Name);
5510}
5511
5512static SMLoc incrementLoc(SMLoc L, int Offset) {
5513 return SMLoc::getFromPointer(L.getPointer() + Offset);
5514}
5515
5516/// parseDirectiveCPU
5517/// ::= .cpu id
5518bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5519 SMLoc CurLoc = getLoc();
5520
5521 StringRef CPU, ExtensionString;
5522 std::tie(CPU, ExtensionString) =
5523 getParser().parseStringToEndOfStatement().trim().split('+');
5524
5525 if (parseToken(AsmToken::EndOfStatement))
5526 return true;
5527
5528 SmallVector<StringRef, 4> RequestedExtensions;
5529 if (!ExtensionString.empty())
5530 ExtensionString.split(RequestedExtensions, '+');
5531
5532 // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5533 // once that is tablegen'ed
5534 if (!getSTI().isCPUStringValid(CPU)) {
5535 Error(CurLoc, "unknown CPU name");
5536 return false;
5537 }
5538
5539 MCSubtargetInfo &STI = copySTI();
5540 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
5541 CurLoc = incrementLoc(CurLoc, CPU.size());
5542
5543 ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5544
5545 FeatureBitset Features = STI.getFeatureBits();
5546 for (auto Name : RequestedExtensions) {
5547 // Advance source location past '+'.
5548 CurLoc = incrementLoc(CurLoc, 1);
5549
5550 bool EnableFeature = true;
5551
5552 if (Name.startswith_lower("no")) {
5553 EnableFeature = false;
5554 Name = Name.substr(2);
5555 }
5556
5557 bool FoundExtension = false;
5558 for (const auto &Extension : ExtensionMap) {
5559 if (Extension.Name != Name)
5560 continue;
5561
5562 if (Extension.Features.none())
5563 report_fatal_error("unsupported architectural extension: " + Name);
5564
5565 FeatureBitset ToggleFeatures = EnableFeature
5566 ? (~Features & Extension.Features)
5567 : ( Features & Extension.Features);
5568 FeatureBitset Features =
5569 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5570 setAvailableFeatures(Features);
5571 FoundExtension = true;
5572
5573 break;
5574 }
5575
5576 if (!FoundExtension)
5577 Error(CurLoc, "unsupported architectural extension");
5578
5579 CurLoc = incrementLoc(CurLoc, Name.size());
5580 }
5581 return false;
5582}
5583
5584/// parseDirectiveInst
5585/// ::= .inst opcode [, ...]
5586bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5587 if (getLexer().is(AsmToken::EndOfStatement))
5588 return Error(Loc, "expected expression following '.inst' directive");
5589
5590 auto parseOp = [&]() -> bool {
5591 SMLoc L = getLoc();
5592 const MCExpr *Expr = nullptr;
5593 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5594 return true;
5595 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5596 if (check(!Value, L, "expected constant expression"))
5597 return true;
5598 getTargetStreamer().emitInst(Value->getValue());
5599 return false;
5600 };
5601
5602 if (parseMany(parseOp))
5603 return addErrorSuffix(" in '.inst' directive");
5604 return false;
5605}
5606
5607// parseDirectiveTLSDescCall:
5608// ::= .tlsdesccall symbol
5609bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5610 StringRef Name;
5611 if (check(getParser().parseIdentifier(Name), L,
5612 "expected symbol after directive") ||
5613 parseToken(AsmToken::EndOfStatement))
5614 return true;
5615
5616 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5617 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5618 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5619
5620 MCInst Inst;
5621 Inst.setOpcode(AArch64::TLSDESCCALL);
5622 Inst.addOperand(MCOperand::createExpr(Expr));
5623
5624 getParser().getStreamer().emitInstruction(Inst, getSTI());
5625 return false;
5626}
5627
5628/// ::= .loh <lohName | lohId> label1, ..., labelN
5629/// The number of arguments depends on the loh identifier.
5630bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5631 MCLOHType Kind;
5632 if (getParser().getTok().isNot(AsmToken::Identifier)) {
5633 if (getParser().getTok().isNot(AsmToken::Integer))
5634 return TokError("expected an identifier or a number in directive");
5635 // We successfully get a numeric value for the identifier.
5636 // Check if it is valid.
5637 int64_t Id = getParser().getTok().getIntVal();
5638 if (Id <= -1U && !isValidMCLOHType(Id))
5639 return TokError("invalid numeric identifier in directive");
5640 Kind = (MCLOHType)Id;
5641 } else {
5642 StringRef Name = getTok().getIdentifier();
5643 // We successfully parse an identifier.
5644 // Check if it is a recognized one.
5645 int Id = MCLOHNameToId(Name);
5646
5647 if (Id == -1)
5648 return TokError("invalid identifier in directive");
5649 Kind = (MCLOHType)Id;
5650 }
5651 // Consume the identifier.
5652 Lex();
5653 // Get the number of arguments of this LOH.
5654 int NbArgs = MCLOHIdToNbArgs(Kind);
5655
5656 assert(NbArgs != -1 && "Invalid number of arguments")((NbArgs != -1 && "Invalid number of arguments") ? static_cast
<void> (0) : __assert_fail ("NbArgs != -1 && \"Invalid number of arguments\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5656, __PRETTY_FUNCTION__))
;
5657
5658 SmallVector<MCSymbol *, 3> Args;
5659 for (int Idx = 0; Idx < NbArgs; ++Idx) {
5660 StringRef Name;
5661 if (getParser().parseIdentifier(Name))
5662 return TokError("expected identifier in directive");
5663 Args.push_back(getContext().getOrCreateSymbol(Name));
5664
5665 if (Idx + 1 == NbArgs)
5666 break;
5667 if (parseToken(AsmToken::Comma,
5668 "unexpected token in '" + Twine(IDVal) + "' directive"))
5669 return true;
5670 }
5671 if (parseToken(AsmToken::EndOfStatement,
5672 "unexpected token in '" + Twine(IDVal) + "' directive"))
5673 return true;
5674
5675 getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
5676 return false;
5677}
5678
5679/// parseDirectiveLtorg
5680/// ::= .ltorg | .pool
5681bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5682 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5683 return true;
5684 getTargetStreamer().emitCurrentConstantPool();
5685 return false;
5686}
5687
5688/// parseDirectiveReq
5689/// ::= name .req registername
5690bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5691 MCAsmParser &Parser = getParser();
5692 Parser.Lex(); // Eat the '.req' token.
5693 SMLoc SRegLoc = getLoc();
5694 RegKind RegisterKind = RegKind::Scalar;
5695 unsigned RegNum;
5696 OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5697
5698 if (ParseRes != MatchOperand_Success) {
5699 StringRef Kind;
5700 RegisterKind = RegKind::NeonVector;
5701 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5702
5703 if (ParseRes == MatchOperand_ParseFail)
5704 return true;
5705
5706 if (ParseRes == MatchOperand_Success && !Kind.empty())
5707 return Error(SRegLoc, "vector register without type specifier expected");
5708 }
5709
5710 if (ParseRes != MatchOperand_Success) {
5711 StringRef Kind;
5712 RegisterKind = RegKind::SVEDataVector;
5713 ParseRes =
5714 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5715
5716 if (ParseRes == MatchOperand_ParseFail)
5717 return true;
5718
5719 if (ParseRes == MatchOperand_Success && !Kind.empty())
5720 return Error(SRegLoc,
5721 "sve vector register without type specifier expected");
5722 }
5723
5724 if (ParseRes != MatchOperand_Success) {
5725 StringRef Kind;
5726 RegisterKind = RegKind::SVEPredicateVector;
5727 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5728
5729 if (ParseRes == MatchOperand_ParseFail)
5730 return true;
5731
5732 if (ParseRes == MatchOperand_Success && !Kind.empty())
5733 return Error(SRegLoc,
5734 "sve predicate register without type specifier expected");
5735 }
5736
5737 if (ParseRes != MatchOperand_Success)
5738 return Error(SRegLoc, "register name or alias expected");
5739
5740 // Shouldn't be anything else.
5741 if (parseToken(AsmToken::EndOfStatement,
5742 "unexpected input in .req directive"))
5743 return true;
5744
5745 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
5746 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
5747 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
5748
5749 return false;
5750}
5751
5752/// parseDirectiveUneq
5753/// ::= .unreq registername
5754bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
5755 MCAsmParser &Parser = getParser();
5756 if (getTok().isNot(AsmToken::Identifier))
5757 return TokError("unexpected input in .unreq directive.");
5758 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());