Bug Summary

File:llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Warning:line 3784, column 9
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AArch64AsmParser.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/build-llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/build-llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include -I /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/build-llvm/lib/Target/AArch64/AsmParser/.. -I /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/.. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/build-llvm/lib/Target/AArch64/AsmParser -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-11-29-190409-37574-1 -x c++ /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "MCTargetDesc/AArch64AddressingModes.h"
10#include "MCTargetDesc/AArch64InstPrinter.h"
11#include "MCTargetDesc/AArch64MCExpr.h"
12#include "MCTargetDesc/AArch64MCTargetDesc.h"
13#include "MCTargetDesc/AArch64TargetStreamer.h"
14#include "TargetInfo/AArch64TargetInfo.h"
15#include "AArch64InstrInfo.h"
16#include "Utils/AArch64BaseInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/StringExtras.h"
23#include "llvm/ADT/StringMap.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/StringSwitch.h"
26#include "llvm/ADT/Twine.h"
27#include "llvm/MC/MCContext.h"
28#include "llvm/MC/MCExpr.h"
29#include "llvm/MC/MCInst.h"
30#include "llvm/MC/MCLinkerOptimizationHint.h"
31#include "llvm/MC/MCObjectFileInfo.h"
32#include "llvm/MC/MCParser/MCAsmLexer.h"
33#include "llvm/MC/MCParser/MCAsmParser.h"
34#include "llvm/MC/MCParser/MCAsmParserExtension.h"
35#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
36#include "llvm/MC/MCParser/MCTargetAsmParser.h"
37#include "llvm/MC/MCRegisterInfo.h"
38#include "llvm/MC/MCStreamer.h"
39#include "llvm/MC/MCSubtargetInfo.h"
40#include "llvm/MC/MCSymbol.h"
41#include "llvm/MC/MCTargetOptions.h"
42#include "llvm/MC/SubtargetFeature.h"
43#include "llvm/MC/MCValue.h"
44#include "llvm/Support/Casting.h"
45#include "llvm/Support/Compiler.h"
46#include "llvm/Support/ErrorHandling.h"
47#include "llvm/Support/MathExtras.h"
48#include "llvm/Support/SMLoc.h"
49#include "llvm/Support/TargetParser.h"
50#include "llvm/Support/TargetRegistry.h"
51#include "llvm/Support/raw_ostream.h"
52#include <cassert>
53#include <cctype>
54#include <cstdint>
55#include <cstdio>
56#include <string>
57#include <tuple>
58#include <utility>
59#include <vector>
60
61using namespace llvm;
62
63namespace {
64
65enum class RegKind {
66 Scalar,
67 NeonVector,
68 SVEDataVector,
69 SVEPredicateVector
70};
71
72enum RegConstraintEqualityTy {
73 EqualsReg,
74 EqualsSuperReg,
75 EqualsSubReg
76};
77
78class AArch64AsmParser : public MCTargetAsmParser {
79private:
80 StringRef Mnemonic; ///< Instruction mnemonic.
81
82 // Map of register aliases registers via the .req directive.
83 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
84
85 class PrefixInfo {
86 public:
87 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
88 PrefixInfo Prefix;
89 switch (Inst.getOpcode()) {
90 case AArch64::MOVPRFX_ZZ:
91 Prefix.Active = true;
92 Prefix.Dst = Inst.getOperand(0).getReg();
93 break;
94 case AArch64::MOVPRFX_ZPmZ_B:
95 case AArch64::MOVPRFX_ZPmZ_H:
96 case AArch64::MOVPRFX_ZPmZ_S:
97 case AArch64::MOVPRFX_ZPmZ_D:
98 Prefix.Active = true;
99 Prefix.Predicated = true;
100 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
101 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 102, __PRETTY_FUNCTION__))
102 "No destructive element size set for movprfx")((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 102, __PRETTY_FUNCTION__))
;
103 Prefix.Dst = Inst.getOperand(0).getReg();
104 Prefix.Pg = Inst.getOperand(2).getReg();
105 break;
106 case AArch64::MOVPRFX_ZPzZ_B:
107 case AArch64::MOVPRFX_ZPzZ_H:
108 case AArch64::MOVPRFX_ZPzZ_S:
109 case AArch64::MOVPRFX_ZPzZ_D:
110 Prefix.Active = true;
111 Prefix.Predicated = true;
112 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
113 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 114, __PRETTY_FUNCTION__))
114 "No destructive element size set for movprfx")((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 114, __PRETTY_FUNCTION__))
;
115 Prefix.Dst = Inst.getOperand(0).getReg();
116 Prefix.Pg = Inst.getOperand(1).getReg();
117 break;
118 default:
119 break;
120 }
121
122 return Prefix;
123 }
124
125 PrefixInfo() : Active(false), Predicated(false) {}
126 bool isActive() const { return Active; }
127 bool isPredicated() const { return Predicated; }
128 unsigned getElementSize() const {
129 assert(Predicated)((Predicated) ? static_cast<void> (0) : __assert_fail (
"Predicated", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 129, __PRETTY_FUNCTION__))
;
130 return ElementSize;
131 }
132 unsigned getDstReg() const { return Dst; }
133 unsigned getPgReg() const {
134 assert(Predicated)((Predicated) ? static_cast<void> (0) : __assert_fail (
"Predicated", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 134, __PRETTY_FUNCTION__))
;
135 return Pg;
136 }
137
138 private:
139 bool Active;
140 bool Predicated;
141 unsigned ElementSize;
142 unsigned Dst;
143 unsigned Pg;
144 } NextPrefix;
145
146 AArch64TargetStreamer &getTargetStreamer() {
147 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
148 return static_cast<AArch64TargetStreamer &>(TS);
149 }
150
151 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
152
153 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
154 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
155 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
156 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
157 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
158 bool parseRegister(OperandVector &Operands);
159 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
160 bool parseNeonVectorList(OperandVector &Operands);
161 bool parseOptionalMulOperand(OperandVector &Operands);
162 bool parseOperand(OperandVector &Operands, bool isCondCode,
163 bool invertCondCode);
164 bool parseImmExpr(int64_t &Out);
165 bool parseComma();
166 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
167 unsigned Last);
168
169 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
170 OperandVector &Operands);
171
172 bool parseDirectiveArch(SMLoc L);
173 bool parseDirectiveArchExtension(SMLoc L);
174 bool parseDirectiveCPU(SMLoc L);
175 bool parseDirectiveInst(SMLoc L);
176
177 bool parseDirectiveTLSDescCall(SMLoc L);
178
179 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
180 bool parseDirectiveLtorg(SMLoc L);
181
182 bool parseDirectiveReq(StringRef Name, SMLoc L);
183 bool parseDirectiveUnreq(SMLoc L);
184 bool parseDirectiveCFINegateRAState();
185 bool parseDirectiveCFIBKeyFrame();
186
187 bool parseDirectiveVariantPCS(SMLoc L);
188
189 bool parseDirectiveSEHAllocStack(SMLoc L);
190 bool parseDirectiveSEHPrologEnd(SMLoc L);
191 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
192 bool parseDirectiveSEHSaveFPLR(SMLoc L);
193 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
194 bool parseDirectiveSEHSaveReg(SMLoc L);
195 bool parseDirectiveSEHSaveRegX(SMLoc L);
196 bool parseDirectiveSEHSaveRegP(SMLoc L);
197 bool parseDirectiveSEHSaveRegPX(SMLoc L);
198 bool parseDirectiveSEHSaveLRPair(SMLoc L);
199 bool parseDirectiveSEHSaveFReg(SMLoc L);
200 bool parseDirectiveSEHSaveFRegX(SMLoc L);
201 bool parseDirectiveSEHSaveFRegP(SMLoc L);
202 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
203 bool parseDirectiveSEHSetFP(SMLoc L);
204 bool parseDirectiveSEHAddFP(SMLoc L);
205 bool parseDirectiveSEHNop(SMLoc L);
206 bool parseDirectiveSEHSaveNext(SMLoc L);
207 bool parseDirectiveSEHEpilogStart(SMLoc L);
208 bool parseDirectiveSEHEpilogEnd(SMLoc L);
209 bool parseDirectiveSEHTrapFrame(SMLoc L);
210 bool parseDirectiveSEHMachineFrame(SMLoc L);
211 bool parseDirectiveSEHContext(SMLoc L);
212 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
213
214 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
215 SmallVectorImpl<SMLoc> &Loc);
216 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
217 OperandVector &Operands, MCStreamer &Out,
218 uint64_t &ErrorInfo,
219 bool MatchingInlineAsm) override;
220/// @name Auto-generated Match Functions
221/// {
222
223#define GET_ASSEMBLER_HEADER
224#include "AArch64GenAsmMatcher.inc"
225
226 /// }
227
228 OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
229 OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
230 RegKind MatchKind);
231 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
232 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
233 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
234 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
235 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
236 template <bool IsSVEPrefetch = false>
237 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
238 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
239 OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
240 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
241 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
242 template<bool AddFPZeroAsLiteral>
243 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
244 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
245 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
246 bool tryParseNeonVectorRegister(OperandVector &Operands);
247 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
248 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
249 template <bool ParseShiftExtend,
250 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
251 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
252 template <bool ParseShiftExtend, bool ParseSuffix>
253 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
254 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
255 template <RegKind VectorKind>
256 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
257 bool ExpectMatch = false);
258 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
259
260public:
261 enum AArch64MatchResultTy {
262 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
263#define GET_OPERAND_DIAGNOSTIC_TYPES
264#include "AArch64GenAsmMatcher.inc"
265 };
266 bool IsILP32;
267
268 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
269 const MCInstrInfo &MII, const MCTargetOptions &Options)
270 : MCTargetAsmParser(Options, STI, MII) {
271 IsILP32 = Options.getABIName() == "ilp32";
272 MCAsmParserExtension::Initialize(Parser);
273 MCStreamer &S = getParser().getStreamer();
274 if (S.getTargetStreamer() == nullptr)
275 new AArch64TargetStreamer(S);
276
277 // Alias .hword/.word/.[dx]word to the target-independent
278 // .2byte/.4byte/.8byte directives as they have the same form and
279 // semantics:
280 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
281 Parser.addAliasForDirective(".hword", ".2byte");
282 Parser.addAliasForDirective(".word", ".4byte");
283 Parser.addAliasForDirective(".dword", ".8byte");
284 Parser.addAliasForDirective(".xword", ".8byte");
285
286 // Initialize the set of available features.
287 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
288 }
289
290 bool regsEqual(const MCParsedAsmOperand &Op1,
291 const MCParsedAsmOperand &Op2) const override;
292 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
293 SMLoc NameLoc, OperandVector &Operands) override;
294 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
295 OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
296 SMLoc &EndLoc) override;
297 bool ParseDirective(AsmToken DirectiveID) override;
298 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
299 unsigned Kind) override;
300
301 static bool classifySymbolRef(const MCExpr *Expr,
302 AArch64MCExpr::VariantKind &ELFRefKind,
303 MCSymbolRefExpr::VariantKind &DarwinRefKind,
304 int64_t &Addend);
305};
306
307/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
308/// instruction.
309class AArch64Operand : public MCParsedAsmOperand {
310private:
311 enum KindTy {
312 k_Immediate,
313 k_ShiftedImm,
314 k_CondCode,
315 k_Register,
316 k_VectorList,
317 k_VectorIndex,
318 k_Token,
319 k_SysReg,
320 k_SysCR,
321 k_Prefetch,
322 k_ShiftExtend,
323 k_FPImm,
324 k_Barrier,
325 k_PSBHint,
326 k_BTIHint,
327 } Kind;
328
329 SMLoc StartLoc, EndLoc;
330
331 struct TokOp {
332 const char *Data;
333 unsigned Length;
334 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
335 };
336
337 // Separate shift/extend operand.
338 struct ShiftExtendOp {
339 AArch64_AM::ShiftExtendType Type;
340 unsigned Amount;
341 bool HasExplicitAmount;
342 };
343
344 struct RegOp {
345 unsigned RegNum;
346 RegKind Kind;
347 int ElementWidth;
348
349 // The register may be allowed as a different register class,
350 // e.g. for GPR64as32 or GPR32as64.
351 RegConstraintEqualityTy EqualityTy;
352
353 // In some cases the shift/extend needs to be explicitly parsed together
354 // with the register, rather than as a separate operand. This is needed
355 // for addressing modes where the instruction as a whole dictates the
356 // scaling/extend, rather than specific bits in the instruction.
357 // By parsing them as a single operand, we avoid the need to pass an
358 // extra operand in all CodeGen patterns (because all operands need to
359 // have an associated value), and we avoid the need to update TableGen to
360 // accept operands that have no associated bits in the instruction.
361 //
362 // An added benefit of parsing them together is that the assembler
363 // can give a sensible diagnostic if the scaling is not correct.
364 //
365 // The default is 'lsl #0' (HasExplicitAmount = false) if no
366 // ShiftExtend is specified.
367 ShiftExtendOp ShiftExtend;
368 };
369
370 struct VectorListOp {
371 unsigned RegNum;
372 unsigned Count;
373 unsigned NumElements;
374 unsigned ElementWidth;
375 RegKind RegisterKind;
376 };
377
378 struct VectorIndexOp {
379 unsigned Val;
380 };
381
382 struct ImmOp {
383 const MCExpr *Val;
384 };
385
386 struct ShiftedImmOp {
387 const MCExpr *Val;
388 unsigned ShiftAmount;
389 };
390
391 struct CondCodeOp {
392 AArch64CC::CondCode Code;
393 };
394
395 struct FPImmOp {
396 uint64_t Val; // APFloat value bitcasted to uint64_t.
397 bool IsExact; // describes whether parsed value was exact.
398 };
399
400 struct BarrierOp {
401 const char *Data;
402 unsigned Length;
403 unsigned Val; // Not the enum since not all values have names.
404 };
405
406 struct SysRegOp {
407 const char *Data;
408 unsigned Length;
409 uint32_t MRSReg;
410 uint32_t MSRReg;
411 uint32_t PStateField;
412 };
413
414 struct SysCRImmOp {
415 unsigned Val;
416 };
417
418 struct PrefetchOp {
419 const char *Data;
420 unsigned Length;
421 unsigned Val;
422 };
423
424 struct PSBHintOp {
425 const char *Data;
426 unsigned Length;
427 unsigned Val;
428 };
429
430 struct BTIHintOp {
431 const char *Data;
432 unsigned Length;
433 unsigned Val;
434 };
435
436 struct ExtendOp {
437 unsigned Val;
438 };
439
440 union {
441 struct TokOp Tok;
442 struct RegOp Reg;
443 struct VectorListOp VectorList;
444 struct VectorIndexOp VectorIndex;
445 struct ImmOp Imm;
446 struct ShiftedImmOp ShiftedImm;
447 struct CondCodeOp CondCode;
448 struct FPImmOp FPImm;
449 struct BarrierOp Barrier;
450 struct SysRegOp SysReg;
451 struct SysCRImmOp SysCRImm;
452 struct PrefetchOp Prefetch;
453 struct PSBHintOp PSBHint;
454 struct BTIHintOp BTIHint;
455 struct ShiftExtendOp ShiftExtend;
456 };
457
458 // Keep the MCContext around as the MCExprs may need manipulated during
459 // the add<>Operands() calls.
460 MCContext &Ctx;
461
462public:
463 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
464
465 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
466 Kind = o.Kind;
467 StartLoc = o.StartLoc;
468 EndLoc = o.EndLoc;
469 switch (Kind) {
470 case k_Token:
471 Tok = o.Tok;
472 break;
473 case k_Immediate:
474 Imm = o.Imm;
475 break;
476 case k_ShiftedImm:
477 ShiftedImm = o.ShiftedImm;
478 break;
479 case k_CondCode:
480 CondCode = o.CondCode;
481 break;
482 case k_FPImm:
483 FPImm = o.FPImm;
484 break;
485 case k_Barrier:
486 Barrier = o.Barrier;
487 break;
488 case k_Register:
489 Reg = o.Reg;
490 break;
491 case k_VectorList:
492 VectorList = o.VectorList;
493 break;
494 case k_VectorIndex:
495 VectorIndex = o.VectorIndex;
496 break;
497 case k_SysReg:
498 SysReg = o.SysReg;
499 break;
500 case k_SysCR:
501 SysCRImm = o.SysCRImm;
502 break;
503 case k_Prefetch:
504 Prefetch = o.Prefetch;
505 break;
506 case k_PSBHint:
507 PSBHint = o.PSBHint;
508 break;
509 case k_BTIHint:
510 BTIHint = o.BTIHint;
511 break;
512 case k_ShiftExtend:
513 ShiftExtend = o.ShiftExtend;
514 break;
515 }
516 }
517
518 /// getStartLoc - Get the location of the first token of this operand.
519 SMLoc getStartLoc() const override { return StartLoc; }
520 /// getEndLoc - Get the location of the last token of this operand.
521 SMLoc getEndLoc() const override { return EndLoc; }
522
523 StringRef getToken() const {
524 assert(Kind == k_Token && "Invalid access!")((Kind == k_Token && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 524, __PRETTY_FUNCTION__))
;
525 return StringRef(Tok.Data, Tok.Length);
526 }
527
528 bool isTokenSuffix() const {
529 assert(Kind == k_Token && "Invalid access!")((Kind == k_Token && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 529, __PRETTY_FUNCTION__))
;
530 return Tok.IsSuffix;
531 }
532
533 const MCExpr *getImm() const {
534 assert(Kind == k_Immediate && "Invalid access!")((Kind == k_Immediate && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 534, __PRETTY_FUNCTION__))
;
535 return Imm.Val;
536 }
537
538 const MCExpr *getShiftedImmVal() const {
539 assert(Kind == k_ShiftedImm && "Invalid access!")((Kind == k_ShiftedImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 539, __PRETTY_FUNCTION__))
;
540 return ShiftedImm.Val;
541 }
542
543 unsigned getShiftedImmShift() const {
544 assert(Kind == k_ShiftedImm && "Invalid access!")((Kind == k_ShiftedImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 544, __PRETTY_FUNCTION__))
;
545 return ShiftedImm.ShiftAmount;
546 }
547
548 AArch64CC::CondCode getCondCode() const {
549 assert(Kind == k_CondCode && "Invalid access!")((Kind == k_CondCode && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 549, __PRETTY_FUNCTION__))
;
550 return CondCode.Code;
551 }
552
553 APFloat getFPImm() const {
554 assert (Kind == k_FPImm && "Invalid access!")((Kind == k_FPImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 554, __PRETTY_FUNCTION__))
;
555 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
556 }
557
558 bool getFPImmIsExact() const {
559 assert (Kind == k_FPImm && "Invalid access!")((Kind == k_FPImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 559, __PRETTY_FUNCTION__))
;
560 return FPImm.IsExact;
561 }
562
563 unsigned getBarrier() const {
564 assert(Kind == k_Barrier && "Invalid access!")((Kind == k_Barrier && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 564, __PRETTY_FUNCTION__))
;
565 return Barrier.Val;
566 }
567
568 StringRef getBarrierName() const {
569 assert(Kind == k_Barrier && "Invalid access!")((Kind == k_Barrier && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 569, __PRETTY_FUNCTION__))
;
570 return StringRef(Barrier.Data, Barrier.Length);
571 }
572
573 unsigned getReg() const override {
574 assert(Kind == k_Register && "Invalid access!")((Kind == k_Register && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 574, __PRETTY_FUNCTION__))
;
575 return Reg.RegNum;
576 }
577
578 RegConstraintEqualityTy getRegEqualityTy() const {
579 assert(Kind == k_Register && "Invalid access!")((Kind == k_Register && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 579, __PRETTY_FUNCTION__))
;
580 return Reg.EqualityTy;
581 }
582
583 unsigned getVectorListStart() const {
584 assert(Kind == k_VectorList && "Invalid access!")((Kind == k_VectorList && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 584, __PRETTY_FUNCTION__))
;
585 return VectorList.RegNum;
586 }
587
588 unsigned getVectorListCount() const {
589 assert(Kind == k_VectorList && "Invalid access!")((Kind == k_VectorList && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 589, __PRETTY_FUNCTION__))
;
590 return VectorList.Count;
591 }
592
593 unsigned getVectorIndex() const {
594 assert(Kind == k_VectorIndex && "Invalid access!")((Kind == k_VectorIndex && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 594, __PRETTY_FUNCTION__))
;
595 return VectorIndex.Val;
596 }
597
598 StringRef getSysReg() const {
599 assert(Kind == k_SysReg && "Invalid access!")((Kind == k_SysReg && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 599, __PRETTY_FUNCTION__))
;
600 return StringRef(SysReg.Data, SysReg.Length);
601 }
602
603 unsigned getSysCR() const {
604 assert(Kind == k_SysCR && "Invalid access!")((Kind == k_SysCR && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 604, __PRETTY_FUNCTION__))
;
605 return SysCRImm.Val;
606 }
607
608 unsigned getPrefetch() const {
609 assert(Kind == k_Prefetch && "Invalid access!")((Kind == k_Prefetch && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 609, __PRETTY_FUNCTION__))
;
610 return Prefetch.Val;
611 }
612
613 unsigned getPSBHint() const {
614 assert(Kind == k_PSBHint && "Invalid access!")((Kind == k_PSBHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 614, __PRETTY_FUNCTION__))
;
615 return PSBHint.Val;
616 }
617
618 StringRef getPSBHintName() const {
619 assert(Kind == k_PSBHint && "Invalid access!")((Kind == k_PSBHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 619, __PRETTY_FUNCTION__))
;
620 return StringRef(PSBHint.Data, PSBHint.Length);
621 }
622
623 unsigned getBTIHint() const {
624 assert(Kind == k_BTIHint && "Invalid access!")((Kind == k_BTIHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 624, __PRETTY_FUNCTION__))
;
625 return BTIHint.Val;
626 }
627
628 StringRef getBTIHintName() const {
629 assert(Kind == k_BTIHint && "Invalid access!")((Kind == k_BTIHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 629, __PRETTY_FUNCTION__))
;
630 return StringRef(BTIHint.Data, BTIHint.Length);
631 }
632
633 StringRef getPrefetchName() const {
634 assert(Kind == k_Prefetch && "Invalid access!")((Kind == k_Prefetch && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 634, __PRETTY_FUNCTION__))
;
635 return StringRef(Prefetch.Data, Prefetch.Length);
636 }
637
638 AArch64_AM::ShiftExtendType getShiftExtendType() const {
639 if (Kind == k_ShiftExtend)
640 return ShiftExtend.Type;
641 if (Kind == k_Register)
642 return Reg.ShiftExtend.Type;
643 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 643)
;
644 }
645
646 unsigned getShiftExtendAmount() const {
647 if (Kind == k_ShiftExtend)
648 return ShiftExtend.Amount;
649 if (Kind == k_Register)
650 return Reg.ShiftExtend.Amount;
651 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 651)
;
652 }
653
654 bool hasShiftExtendAmount() const {
655 if (Kind == k_ShiftExtend)
656 return ShiftExtend.HasExplicitAmount;
657 if (Kind == k_Register)
658 return Reg.ShiftExtend.HasExplicitAmount;
659 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 659)
;
660 }
661
662 bool isImm() const override { return Kind == k_Immediate; }
663 bool isMem() const override { return false; }
664
665 bool isUImm6() const {
666 if (!isImm())
667 return false;
668 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
669 if (!MCE)
670 return false;
671 int64_t Val = MCE->getValue();
672 return (Val >= 0 && Val < 64);
673 }
674
675 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
676
677 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
678 return isImmScaled<Bits, Scale>(true);
679 }
680
681 template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
682 return isImmScaled<Bits, Scale>(false);
683 }
684
685 template <int Bits, int Scale>
686 DiagnosticPredicate isImmScaled(bool Signed) const {
687 if (!isImm())
688 return DiagnosticPredicateTy::NoMatch;
689
690 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
691 if (!MCE)
692 return DiagnosticPredicateTy::NoMatch;
693
694 int64_t MinVal, MaxVal;
695 if (Signed) {
696 int64_t Shift = Bits - 1;
697 MinVal = (int64_t(1) << Shift) * -Scale;
698 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
699 } else {
700 MinVal = 0;
701 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
702 }
703
704 int64_t Val = MCE->getValue();
705 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
706 return DiagnosticPredicateTy::Match;
707
708 return DiagnosticPredicateTy::NearMatch;
709 }
710
711 DiagnosticPredicate isSVEPattern() const {
712 if (!isImm())
713 return DiagnosticPredicateTy::NoMatch;
714 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
715 if (!MCE)
716 return DiagnosticPredicateTy::NoMatch;
717 int64_t Val = MCE->getValue();
718 if (Val >= 0 && Val < 32)
719 return DiagnosticPredicateTy::Match;
720 return DiagnosticPredicateTy::NearMatch;
721 }
722
723 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
724 AArch64MCExpr::VariantKind ELFRefKind;
725 MCSymbolRefExpr::VariantKind DarwinRefKind;
726 int64_t Addend;
727 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
728 Addend)) {
729 // If we don't understand the expression, assume the best and
730 // let the fixup and relocation code deal with it.
731 return true;
732 }
733
734 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
735 ELFRefKind == AArch64MCExpr::VK_LO12 ||
736 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
737 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
738 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
739 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
740 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
741 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
742 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
743 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
744 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
745 // Note that we don't range-check the addend. It's adjusted modulo page
746 // size when converted, so there is no "out of range" condition when using
747 // @pageoff.
748 return true;
749 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
750 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
751 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
752 return Addend == 0;
753 }
754
755 return false;
756 }
757
758 template <int Scale> bool isUImm12Offset() const {
759 if (!isImm())
760 return false;
761
762 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
763 if (!MCE)
764 return isSymbolicUImm12Offset(getImm());
765
766 int64_t Val = MCE->getValue();
767 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
768 }
769
770 template <int N, int M>
771 bool isImmInRange() const {
772 if (!isImm())
773 return false;
774 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
775 if (!MCE)
776 return false;
777 int64_t Val = MCE->getValue();
778 return (Val >= N && Val <= M);
779 }
780
781 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
782 // a logical immediate can always be represented when inverted.
783 template <typename T>
784 bool isLogicalImm() const {
785 if (!isImm())
786 return false;
787 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
788 if (!MCE)
789 return false;
790
791 int64_t Val = MCE->getValue();
792 // Avoid left shift by 64 directly.
793 uint64_t Upper = UINT64_C(-1)-1UL << (sizeof(T) * 4) << (sizeof(T) * 4);
794 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
795 if ((Val & Upper) && (Val & Upper) != Upper)
796 return false;
797
798 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
799 }
800
801 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
802
803 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
804 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
805 /// immediate that can be shifted by 'Shift'.
806 template <unsigned Width>
807 Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
808 if (isShiftedImm() && Width == getShiftedImmShift())
809 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
810 return std::make_pair(CE->getValue(), Width);
811
812 if (isImm())
813 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
814 int64_t Val = CE->getValue();
815 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
816 return std::make_pair(Val >> Width, Width);
817 else
818 return std::make_pair(Val, 0u);
819 }
820
821 return {};
822 }
823
824 bool isAddSubImm() const {
825 if (!isShiftedImm() && !isImm())
826 return false;
827
828 const MCExpr *Expr;
829
830 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
831 if (isShiftedImm()) {
832 unsigned Shift = ShiftedImm.ShiftAmount;
833 Expr = ShiftedImm.Val;
834 if (Shift != 0 && Shift != 12)
835 return false;
836 } else {
837 Expr = getImm();
838 }
839
840 AArch64MCExpr::VariantKind ELFRefKind;
841 MCSymbolRefExpr::VariantKind DarwinRefKind;
842 int64_t Addend;
843 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
844 DarwinRefKind, Addend)) {
845 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
846 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
847 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
848 || ELFRefKind == AArch64MCExpr::VK_LO12
849 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
850 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
851 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
852 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
853 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
854 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
855 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
856 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
857 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
858 }
859
860 // If it's a constant, it should be a real immediate in range.
861 if (auto ShiftedVal = getShiftedVal<12>())
862 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
863
864 // If it's an expression, we hope for the best and let the fixup/relocation
865 // code deal with it.
866 return true;
867 }
868
869 bool isAddSubImmNeg() const {
870 if (!isShiftedImm() && !isImm())
871 return false;
872
873 // Otherwise it should be a real negative immediate in range.
874 if (auto ShiftedVal = getShiftedVal<12>())
875 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
876
877 return false;
878 }
879
880 // Signed value in the range -128 to +127. For element widths of
881 // 16 bits or higher it may also be a signed multiple of 256 in the
882 // range -32768 to +32512.
883 // For element-width of 8 bits a range of -128 to 255 is accepted,
884 // since a copy of a byte can be either signed/unsigned.
885 template <typename T>
886 DiagnosticPredicate isSVECpyImm() const {
887 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
888 return DiagnosticPredicateTy::NoMatch;
889
890 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
891 std::is_same<int8_t, T>::value;
892 if (auto ShiftedImm = getShiftedVal<8>())
893 if (!(IsByte && ShiftedImm->second) &&
894 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
895 << ShiftedImm->second))
896 return DiagnosticPredicateTy::Match;
897
898 return DiagnosticPredicateTy::NearMatch;
899 }
900
901 // Unsigned value in the range 0 to 255. For element widths of
902 // 16 bits or higher it may also be a signed multiple of 256 in the
903 // range 0 to 65280.
904 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
905 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
906 return DiagnosticPredicateTy::NoMatch;
907
908 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
909 std::is_same<int8_t, T>::value;
910 if (auto ShiftedImm = getShiftedVal<8>())
911 if (!(IsByte && ShiftedImm->second) &&
912 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
913 << ShiftedImm->second))
914 return DiagnosticPredicateTy::Match;
915
916 return DiagnosticPredicateTy::NearMatch;
917 }
918
919 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
920 if (isLogicalImm<T>() && !isSVECpyImm<T>())
921 return DiagnosticPredicateTy::Match;
922 return DiagnosticPredicateTy::NoMatch;
923 }
924
925 bool isCondCode() const { return Kind == k_CondCode; }
926
927 bool isSIMDImmType10() const {
928 if (!isImm())
929 return false;
930 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
931 if (!MCE)
932 return false;
933 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
934 }
935
936 template<int N>
937 bool isBranchTarget() const {
938 if (!isImm())
939 return false;
940 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
941 if (!MCE)
942 return true;
943 int64_t Val = MCE->getValue();
944 if (Val & 0x3)
945 return false;
946 assert(N > 0 && "Branch target immediate cannot be 0 bits!")((N > 0 && "Branch target immediate cannot be 0 bits!"
) ? static_cast<void> (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 946, __PRETTY_FUNCTION__))
;
947 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
948 }
949
950 bool
951 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
952 if (!isImm())
953 return false;
954
955 AArch64MCExpr::VariantKind ELFRefKind;
956 MCSymbolRefExpr::VariantKind DarwinRefKind;
957 int64_t Addend;
958 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
959 DarwinRefKind, Addend)) {
960 return false;
961 }
962 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
963 return false;
964
965 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
966 if (ELFRefKind == AllowedModifiers[i])
967 return true;
968 }
969
970 return false;
971 }
972
973 bool isMovWSymbolG3() const {
974 return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
975 }
976
977 bool isMovWSymbolG2() const {
978 return isMovWSymbol(
979 {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
980 AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
981 AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
982 AArch64MCExpr::VK_DTPREL_G2});
983 }
984
985 bool isMovWSymbolG1() const {
986 return isMovWSymbol(
987 {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
988 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
989 AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
990 AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
991 AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
992 }
993
994 bool isMovWSymbolG0() const {
995 return isMovWSymbol(
996 {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
997 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
998 AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
999 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
1000 AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
1001 }
1002
1003 template<int RegWidth, int Shift>
1004 bool isMOVZMovAlias() const {
1005 if (!isImm()) return false;
1006
1007 const MCExpr *E = getImm();
1008 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1009 uint64_t Value = CE->getValue();
1010
1011 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1012 }
1013 // Only supports the case of Shift being 0 if an expression is used as an
1014 // operand
1015 return !Shift && E;
1016 }
1017
1018 template<int RegWidth, int Shift>
1019 bool isMOVNMovAlias() const {
1020 if (!isImm()) return false;
1021
1022 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1023 if (!CE) return false;
1024 uint64_t Value = CE->getValue();
1025
1026 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1027 }
1028
1029 bool isFPImm() const {
1030 return Kind == k_FPImm &&
1031 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1032 }
1033
1034 bool isBarrier() const { return Kind == k_Barrier; }
1035 bool isSysReg() const { return Kind == k_SysReg; }
1036
1037 bool isMRSSystemRegister() const {
1038 if (!isSysReg()) return false;
1039
1040 return SysReg.MRSReg != -1U;
1041 }
1042
1043 bool isMSRSystemRegister() const {
1044 if (!isSysReg()) return false;
1045 return SysReg.MSRReg != -1U;
1046 }
1047
1048 bool isSystemPStateFieldWithImm0_1() const {
1049 if (!isSysReg()) return false;
1050 return (SysReg.PStateField == AArch64PState::PAN ||
1051 SysReg.PStateField == AArch64PState::DIT ||
1052 SysReg.PStateField == AArch64PState::UAO ||
1053 SysReg.PStateField == AArch64PState::SSBS);
1054 }
1055
1056 bool isSystemPStateFieldWithImm0_15() const {
1057 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1058 return SysReg.PStateField != -1U;
1059 }
1060
1061 bool isReg() const override {
1062 return Kind == k_Register;
1063 }
1064
1065 bool isScalarReg() const {
1066 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1067 }
1068
1069 bool isNeonVectorReg() const {
1070 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1071 }
1072
1073 bool isNeonVectorRegLo() const {
1074 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1075 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1076 Reg.RegNum) ||
1077 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1078 Reg.RegNum));
1079 }
1080
1081 template <unsigned Class> bool isSVEVectorReg() const {
1082 RegKind RK;
1083 switch (Class) {
1084 case AArch64::ZPRRegClassID:
1085 case AArch64::ZPR_3bRegClassID:
1086 case AArch64::ZPR_4bRegClassID:
1087 RK = RegKind::SVEDataVector;
1088 break;
1089 case AArch64::PPRRegClassID:
1090 case AArch64::PPR_3bRegClassID:
1091 RK = RegKind::SVEPredicateVector;
1092 break;
1093 default:
1094 llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class",
"/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1094)
;
1095 }
1096
1097 return (Kind == k_Register && Reg.Kind == RK) &&
1098 AArch64MCRegisterClasses[Class].contains(getReg());
1099 }
1100
1101 template <unsigned Class> bool isFPRasZPR() const {
1102 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1103 AArch64MCRegisterClasses[Class].contains(getReg());
1104 }
1105
1106 template <int ElementWidth, unsigned Class>
1107 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1108 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1109 return DiagnosticPredicateTy::NoMatch;
1110
1111 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1112 return DiagnosticPredicateTy::Match;
1113
1114 return DiagnosticPredicateTy::NearMatch;
1115 }
1116
1117 template <int ElementWidth, unsigned Class>
1118 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1119 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1120 return DiagnosticPredicateTy::NoMatch;
1121
1122 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1123 return DiagnosticPredicateTy::Match;
1124
1125 return DiagnosticPredicateTy::NearMatch;
1126 }
1127
1128 template <int ElementWidth, unsigned Class,
1129 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1130 bool ShiftWidthAlwaysSame>
1131 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1132 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1133 if (!VectorMatch.isMatch())
1134 return DiagnosticPredicateTy::NoMatch;
1135
1136 // Give a more specific diagnostic when the user has explicitly typed in
1137 // a shift-amount that does not match what is expected, but for which
1138 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1139 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1140 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1141 ShiftExtendTy == AArch64_AM::SXTW) &&
1142 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1143 return DiagnosticPredicateTy::NoMatch;
1144
1145 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1146 return DiagnosticPredicateTy::Match;
1147
1148 return DiagnosticPredicateTy::NearMatch;
1149 }
1150
1151 bool isGPR32as64() const {
1152 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1153 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1154 }
1155
1156 bool isGPR64as32() const {
1157 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1158 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1159 }
1160
1161 bool isWSeqPair() const {
1162 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1163 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1164 Reg.RegNum);
1165 }
1166
1167 bool isXSeqPair() const {
1168 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1169 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1170 Reg.RegNum);
1171 }
1172
1173 template<int64_t Angle, int64_t Remainder>
1174 DiagnosticPredicate isComplexRotation() const {
1175 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1176
1177 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1178 if (!CE) return DiagnosticPredicateTy::NoMatch;
1179 uint64_t Value = CE->getValue();
1180
1181 if (Value % Angle == Remainder && Value <= 270)
1182 return DiagnosticPredicateTy::Match;
1183 return DiagnosticPredicateTy::NearMatch;
1184 }
1185
1186 template <unsigned RegClassID> bool isGPR64() const {
1187 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1188 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1189 }
1190
1191 template <unsigned RegClassID, int ExtWidth>
1192 DiagnosticPredicate isGPR64WithShiftExtend() const {
1193 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1194 return DiagnosticPredicateTy::NoMatch;
1195
1196 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1197 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1198 return DiagnosticPredicateTy::Match;
1199 return DiagnosticPredicateTy::NearMatch;
1200 }
1201
1202 /// Is this a vector list with the type implicit (presumably attached to the
1203 /// instruction itself)?
1204 template <RegKind VectorKind, unsigned NumRegs>
1205 bool isImplicitlyTypedVectorList() const {
1206 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1207 VectorList.NumElements == 0 &&
1208 VectorList.RegisterKind == VectorKind;
1209 }
1210
1211 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1212 unsigned ElementWidth>
1213 bool isTypedVectorList() const {
1214 if (Kind != k_VectorList)
1215 return false;
1216 if (VectorList.Count != NumRegs)
1217 return false;
1218 if (VectorList.RegisterKind != VectorKind)
1219 return false;
1220 if (VectorList.ElementWidth != ElementWidth)
1221 return false;
1222 return VectorList.NumElements == NumElements;
1223 }
1224
1225 template <int Min, int Max>
1226 DiagnosticPredicate isVectorIndex() const {
1227 if (Kind != k_VectorIndex)
1228 return DiagnosticPredicateTy::NoMatch;
1229 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1230 return DiagnosticPredicateTy::Match;
1231 return DiagnosticPredicateTy::NearMatch;
1232 }
1233
1234 bool isToken() const override { return Kind == k_Token; }
1235
1236 bool isTokenEqual(StringRef Str) const {
1237 return Kind == k_Token && getToken() == Str;
1238 }
1239 bool isSysCR() const { return Kind == k_SysCR; }
1240 bool isPrefetch() const { return Kind == k_Prefetch; }
1241 bool isPSBHint() const { return Kind == k_PSBHint; }
1242 bool isBTIHint() const { return Kind == k_BTIHint; }
1243 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1244 bool isShifter() const {
1245 if (!isShiftExtend())
1246 return false;
1247
1248 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1249 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1250 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1251 ST == AArch64_AM::MSL);
1252 }
1253
1254 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1255 if (Kind != k_FPImm)
1256 return DiagnosticPredicateTy::NoMatch;
1257
1258 if (getFPImmIsExact()) {
1259 // Lookup the immediate from table of supported immediates.
1260 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1261 assert(Desc && "Unknown enum value")((Desc && "Unknown enum value") ? static_cast<void
> (0) : __assert_fail ("Desc && \"Unknown enum value\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1261, __PRETTY_FUNCTION__))
;
1262
1263 // Calculate its FP value.
1264 APFloat RealVal(APFloat::IEEEdouble());
1265 auto StatusOrErr =
1266 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1267 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1268 llvm_unreachable("FP immediate is not exact")::llvm::llvm_unreachable_internal("FP immediate is not exact"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1268)
;
1269
1270 if (getFPImm().bitwiseIsEqual(RealVal))
1271 return DiagnosticPredicateTy::Match;
1272 }
1273
1274 return DiagnosticPredicateTy::NearMatch;
1275 }
1276
1277 template <unsigned ImmA, unsigned ImmB>
1278 DiagnosticPredicate isExactFPImm() const {
1279 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1280 if ((Res = isExactFPImm<ImmA>()))
1281 return DiagnosticPredicateTy::Match;
1282 if ((Res = isExactFPImm<ImmB>()))
1283 return DiagnosticPredicateTy::Match;
1284 return Res;
1285 }
1286
1287 bool isExtend() const {
1288 if (!isShiftExtend())
1289 return false;
1290
1291 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1292 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1293 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1294 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1295 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1296 ET == AArch64_AM::LSL) &&
1297 getShiftExtendAmount() <= 4;
1298 }
1299
1300 bool isExtend64() const {
1301 if (!isExtend())
1302 return false;
1303 // Make sure the extend expects a 32-bit source register.
1304 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1305 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1306 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1307 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1308 }
1309
1310 bool isExtendLSL64() const {
1311 if (!isExtend())
1312 return false;
1313 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1314 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1315 ET == AArch64_AM::LSL) &&
1316 getShiftExtendAmount() <= 4;
1317 }
1318
1319 template<int Width> bool isMemXExtend() const {
1320 if (!isExtend())
1321 return false;
1322 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1323 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1324 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1325 getShiftExtendAmount() == 0);
1326 }
1327
1328 template<int Width> bool isMemWExtend() const {
1329 if (!isExtend())
1330 return false;
1331 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1332 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1333 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1334 getShiftExtendAmount() == 0);
1335 }
1336
1337 template <unsigned width>
1338 bool isArithmeticShifter() const {
1339 if (!isShifter())
1340 return false;
1341
1342 // An arithmetic shifter is LSL, LSR, or ASR.
1343 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1344 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1345 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1346 }
1347
1348 template <unsigned width>
1349 bool isLogicalShifter() const {
1350 if (!isShifter())
1351 return false;
1352
1353 // A logical shifter is LSL, LSR, ASR or ROR.
1354 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1355 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1356 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1357 getShiftExtendAmount() < width;
1358 }
1359
1360 bool isMovImm32Shifter() const {
1361 if (!isShifter())
1362 return false;
1363
1364 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1365 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1366 if (ST != AArch64_AM::LSL)
1367 return false;
1368 uint64_t Val = getShiftExtendAmount();
1369 return (Val == 0 || Val == 16);
1370 }
1371
1372 bool isMovImm64Shifter() const {
1373 if (!isShifter())
1374 return false;
1375
1376 // A MOVi shifter is LSL of 0 or 16.
1377 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1378 if (ST != AArch64_AM::LSL)
1379 return false;
1380 uint64_t Val = getShiftExtendAmount();
1381 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1382 }
1383
1384 bool isLogicalVecShifter() const {
1385 if (!isShifter())
1386 return false;
1387
1388 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1389 unsigned Shift = getShiftExtendAmount();
1390 return getShiftExtendType() == AArch64_AM::LSL &&
1391 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1392 }
1393
1394 bool isLogicalVecHalfWordShifter() const {
1395 if (!isLogicalVecShifter())
1396 return false;
1397
1398 // A logical vector shifter is a left shift by 0 or 8.
1399 unsigned Shift = getShiftExtendAmount();
1400 return getShiftExtendType() == AArch64_AM::LSL &&
1401 (Shift == 0 || Shift == 8);
1402 }
1403
1404 bool isMoveVecShifter() const {
1405 if (!isShiftExtend())
1406 return false;
1407
1408 // A logical vector shifter is a left shift by 8 or 16.
1409 unsigned Shift = getShiftExtendAmount();
1410 return getShiftExtendType() == AArch64_AM::MSL &&
1411 (Shift == 8 || Shift == 16);
1412 }
1413
1414 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1415 // to LDUR/STUR when the offset is not legal for the former but is for
1416 // the latter. As such, in addition to checking for being a legal unscaled
1417 // address, also check that it is not a legal scaled address. This avoids
1418 // ambiguity in the matcher.
1419 template<int Width>
1420 bool isSImm9OffsetFB() const {
1421 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1422 }
1423
1424 bool isAdrpLabel() const {
1425 // Validation was handled during parsing, so we just sanity check that
1426 // something didn't go haywire.
1427 if (!isImm())
1428 return false;
1429
1430 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1431 int64_t Val = CE->getValue();
1432 int64_t Min = - (4096 * (1LL << (21 - 1)));
1433 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1434 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1435 }
1436
1437 return true;
1438 }
1439
1440 bool isAdrLabel() const {
1441 // Validation was handled during parsing, so we just sanity check that
1442 // something didn't go haywire.
1443 if (!isImm())
1444 return false;
1445
1446 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1447 int64_t Val = CE->getValue();
1448 int64_t Min = - (1LL << (21 - 1));
1449 int64_t Max = ((1LL << (21 - 1)) - 1);
1450 return Val >= Min && Val <= Max;
1451 }
1452
1453 return true;
1454 }
1455
1456 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1457 // Add as immediates when possible. Null MCExpr = 0.
1458 if (!Expr)
1459 Inst.addOperand(MCOperand::createImm(0));
1460 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1461 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1462 else
1463 Inst.addOperand(MCOperand::createExpr(Expr));
1464 }
1465
1466 void addRegOperands(MCInst &Inst, unsigned N) const {
1467 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1467, __PRETTY_FUNCTION__))
;
1468 Inst.addOperand(MCOperand::createReg(getReg()));
1469 }
1470
1471 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1472 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1472, __PRETTY_FUNCTION__))
;
1473 assert(((AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1474, __PRETTY_FUNCTION__))
1474 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1474, __PRETTY_FUNCTION__))
;
1475
1476 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1477 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1478 RI->getEncodingValue(getReg()));
1479
1480 Inst.addOperand(MCOperand::createReg(Reg));
1481 }
1482
1483 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1484 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1484, __PRETTY_FUNCTION__))
;
1485 assert(((AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1486, __PRETTY_FUNCTION__))
1486 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1486, __PRETTY_FUNCTION__))
;
1487
1488 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1489 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1490 RI->getEncodingValue(getReg()));
1491
1492 Inst.addOperand(MCOperand::createReg(Reg));
1493 }
1494
1495 template <int Width>
1496 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1497 unsigned Base;
1498 switch (Width) {
1499 case 8: Base = AArch64::B0; break;
1500 case 16: Base = AArch64::H0; break;
1501 case 32: Base = AArch64::S0; break;
1502 case 64: Base = AArch64::D0; break;
1503 case 128: Base = AArch64::Q0; break;
1504 default:
1505 llvm_unreachable("Unsupported width")::llvm::llvm_unreachable_internal("Unsupported width", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1505)
;
1506 }
1507 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1508 }
1509
1510 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1511 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1511, __PRETTY_FUNCTION__))
;
1512 assert(((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1513, __PRETTY_FUNCTION__))
1513 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1513, __PRETTY_FUNCTION__))
;
1514 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1515 }
1516
1517 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1518 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1518, __PRETTY_FUNCTION__))
;
1519 assert(((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1520, __PRETTY_FUNCTION__))
1520 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1520, __PRETTY_FUNCTION__))
;
1521 Inst.addOperand(MCOperand::createReg(getReg()));
1522 }
1523
1524 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1525 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1525, __PRETTY_FUNCTION__))
;
1526 Inst.addOperand(MCOperand::createReg(getReg()));
1527 }
1528
1529 enum VecListIndexType {
1530 VecListIdx_DReg = 0,
1531 VecListIdx_QReg = 1,
1532 VecListIdx_ZReg = 2,
1533 };
1534
1535 template <VecListIndexType RegTy, unsigned NumRegs>
1536 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1537 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1537, __PRETTY_FUNCTION__))
;
1538 static const unsigned FirstRegs[][5] = {
1539 /* DReg */ { AArch64::Q0,
1540 AArch64::D0, AArch64::D0_D1,
1541 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1542 /* QReg */ { AArch64::Q0,
1543 AArch64::Q0, AArch64::Q0_Q1,
1544 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1545 /* ZReg */ { AArch64::Z0,
1546 AArch64::Z0, AArch64::Z0_Z1,
1547 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1548 };
1549
1550 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs"
) ? static_cast<void> (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1551, __PRETTY_FUNCTION__))
1551 " NumRegs must be <= 4 for ZRegs")(((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs"
) ? static_cast<void> (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1551, __PRETTY_FUNCTION__))
;
1552
1553 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1554 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1555 FirstRegs[(unsigned)RegTy][0]));
1556 }
1557
1558 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1559 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1559, __PRETTY_FUNCTION__))
;
1560 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1561 }
1562
1563 template <unsigned ImmIs0, unsigned ImmIs1>
1564 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1565 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1565, __PRETTY_FUNCTION__))
;
1566 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")((bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand"
) ? static_cast<void> (0) : __assert_fail ("bool(isExactFPImm<ImmIs0, ImmIs1>()) && \"Invalid operand\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1566, __PRETTY_FUNCTION__))
;
1567 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1568 }
1569
1570 void addImmOperands(MCInst &Inst, unsigned N) const {
1571 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1571, __PRETTY_FUNCTION__))
;
1572 // If this is a pageoff symrefexpr with an addend, adjust the addend
1573 // to be only the page-offset portion. Otherwise, just add the expr
1574 // as-is.
1575 addExpr(Inst, getImm());
1576 }
1577
1578 template <int Shift>
1579 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1580 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1580, __PRETTY_FUNCTION__))
;
1581 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1582 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1583 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1584 } else if (isShiftedImm()) {
1585 addExpr(Inst, getShiftedImmVal());
1586 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1587 } else {
1588 addExpr(Inst, getImm());
1589 Inst.addOperand(MCOperand::createImm(0));
1590 }
1591 }
1592
1593 template <int Shift>
1594 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1595 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1595, __PRETTY_FUNCTION__))
;
1596 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1597 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1598 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1599 } else
1600 llvm_unreachable("Not a shifted negative immediate")::llvm::llvm_unreachable_internal("Not a shifted negative immediate"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1600)
;
1601 }
1602
1603 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1604 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1604, __PRETTY_FUNCTION__))
;
1605 Inst.addOperand(MCOperand::createImm(getCondCode()));
1606 }
1607
1608 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1609 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1609, __PRETTY_FUNCTION__))
;
1610 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1611 if (!MCE)
1612 addExpr(Inst, getImm());
1613 else
1614 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1615 }
1616
1617 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1618 addImmOperands(Inst, N);
1619 }
1620
1621 template<int Scale>
1622 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1623 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1623, __PRETTY_FUNCTION__))
;
1624 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1625
1626 if (!MCE) {
1627 Inst.addOperand(MCOperand::createExpr(getImm()));
1628 return;
1629 }
1630 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1631 }
1632
1633 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1634 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1634, __PRETTY_FUNCTION__))
;
1635 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1636 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1637 }
1638
1639 template <int Scale>
1640 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1641 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1641, __PRETTY_FUNCTION__))
;
1642 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1643 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1644 }
1645
1646 template <typename T>
1647 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1648 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1648, __PRETTY_FUNCTION__))
;
1649 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1650 std::make_unsigned_t<T> Val = MCE->getValue();
1651 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1652 Inst.addOperand(MCOperand::createImm(encoding));
1653 }
1654
1655 template <typename T>
1656 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1657 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1657, __PRETTY_FUNCTION__))
;
1658 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1659 std::make_unsigned_t<T> Val = ~MCE->getValue();
1660 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1661 Inst.addOperand(MCOperand::createImm(encoding));
1662 }
1663
1664 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1665 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1665, __PRETTY_FUNCTION__))
;
1666 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1667 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1668 Inst.addOperand(MCOperand::createImm(encoding));
1669 }
1670
1671 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1672 // Branch operands don't encode the low bits, so shift them off
1673 // here. If it's a label, however, just put it on directly as there's
1674 // not enough information now to do anything.
1675 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1675, __PRETTY_FUNCTION__))
;
1676 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1677 if (!MCE) {
1678 addExpr(Inst, getImm());
1679 return;
1680 }
1681 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1681, __PRETTY_FUNCTION__))
;
1682 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1683 }
1684
1685 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1686 // Branch operands don't encode the low bits, so shift them off
1687 // here. If it's a label, however, just put it on directly as there's
1688 // not enough information now to do anything.
1689 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1689, __PRETTY_FUNCTION__))
;
1690 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1691 if (!MCE) {
1692 addExpr(Inst, getImm());
1693 return;
1694 }
1695 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1695, __PRETTY_FUNCTION__))
;
1696 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1697 }
1698
1699 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1700 // Branch operands don't encode the low bits, so shift them off
1701 // here. If it's a label, however, just put it on directly as there's
1702 // not enough information now to do anything.
1703 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1703, __PRETTY_FUNCTION__))
;
1704 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1705 if (!MCE) {
1706 addExpr(Inst, getImm());
1707 return;
1708 }
1709 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1709, __PRETTY_FUNCTION__))
;
1710 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1711 }
1712
1713 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1714 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1714, __PRETTY_FUNCTION__))
;
1715 Inst.addOperand(MCOperand::createImm(
1716 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1717 }
1718
1719 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1720 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1720, __PRETTY_FUNCTION__))
;
1721 Inst.addOperand(MCOperand::createImm(getBarrier()));
1722 }
1723
1724 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1725 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1725, __PRETTY_FUNCTION__))
;
1726
1727 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1728 }
1729
1730 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1731 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1731, __PRETTY_FUNCTION__))
;
1732
1733 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1734 }
1735
1736 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1737 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1737, __PRETTY_FUNCTION__))
;
1738
1739 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1740 }
1741
1742 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1743 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1743, __PRETTY_FUNCTION__))
;
1744
1745 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1746 }
1747
1748 void addSysCROperands(MCInst &Inst, unsigned N) const {
1749 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1749, __PRETTY_FUNCTION__))
;
1750 Inst.addOperand(MCOperand::createImm(getSysCR()));
1751 }
1752
1753 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1754 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1754, __PRETTY_FUNCTION__))
;
1755 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1756 }
1757
1758 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1759 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1759, __PRETTY_FUNCTION__))
;
1760 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1761 }
1762
1763 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1764 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1764, __PRETTY_FUNCTION__))
;
1765 Inst.addOperand(MCOperand::createImm(getBTIHint()));
1766 }
1767
1768 void addShifterOperands(MCInst &Inst, unsigned N) const {
1769 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1769, __PRETTY_FUNCTION__))
;
1770 unsigned Imm =
1771 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1772 Inst.addOperand(MCOperand::createImm(Imm));
1773 }
1774
1775 void addExtendOperands(MCInst &Inst, unsigned N) const {
1776 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1776, __PRETTY_FUNCTION__))
;
1777 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1778 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1779 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1780 Inst.addOperand(MCOperand::createImm(Imm));
1781 }
1782
1783 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1784 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1784, __PRETTY_FUNCTION__))
;
1785 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1786 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1787 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1788 Inst.addOperand(MCOperand::createImm(Imm));
1789 }
1790
1791 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1792 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1792, __PRETTY_FUNCTION__))
;
1793 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1794 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1795 Inst.addOperand(MCOperand::createImm(IsSigned));
1796 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1797 }
1798
1799 // For 8-bit load/store instructions with a register offset, both the
1800 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1801 // they're disambiguated by whether the shift was explicit or implicit rather
1802 // than its size.
1803 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1804 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1804, __PRETTY_FUNCTION__))
;
1805 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1806 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1807 Inst.addOperand(MCOperand::createImm(IsSigned));
1808 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1809 }
1810
1811 template<int Shift>
1812 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1813 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1813, __PRETTY_FUNCTION__))
;
1814
1815 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1816 if (CE) {
1817 uint64_t Value = CE->getValue();
1818 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1819 } else {
1820 addExpr(Inst, getImm());
1821 }
1822 }
1823
1824 template<int Shift>
1825 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1826 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1826, __PRETTY_FUNCTION__))
;
1827
1828 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1829 uint64_t Value = CE->getValue();
1830 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1831 }
1832
1833 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1834 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1834, __PRETTY_FUNCTION__))
;
1835 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1836 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1837 }
1838
1839 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1840 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1840, __PRETTY_FUNCTION__))
;
1841 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1842 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1843 }
1844
1845 void print(raw_ostream &OS) const override;
1846
1847 static std::unique_ptr<AArch64Operand>
1848 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1849 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1850 Op->Tok.Data = Str.data();
1851 Op->Tok.Length = Str.size();
1852 Op->Tok.IsSuffix = IsSuffix;
1853 Op->StartLoc = S;
1854 Op->EndLoc = S;
1855 return Op;
1856 }
1857
1858 static std::unique_ptr<AArch64Operand>
1859 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1860 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1861 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1862 unsigned ShiftAmount = 0,
1863 unsigned HasExplicitAmount = false) {
1864 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1865 Op->Reg.RegNum = RegNum;
1866 Op->Reg.Kind = Kind;
1867 Op->Reg.ElementWidth = 0;
1868 Op->Reg.EqualityTy = EqTy;
1869 Op->Reg.ShiftExtend.Type = ExtTy;
1870 Op->Reg.ShiftExtend.Amount = ShiftAmount;
1871 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1872 Op->StartLoc = S;
1873 Op->EndLoc = E;
1874 return Op;
1875 }
1876
1877 static std::unique_ptr<AArch64Operand>
1878 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1879 SMLoc S, SMLoc E, MCContext &Ctx,
1880 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1881 unsigned ShiftAmount = 0,
1882 unsigned HasExplicitAmount = false) {
1883 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1885, __PRETTY_FUNCTION__))
1884 Kind == RegKind::SVEPredicateVector) &&(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1885, __PRETTY_FUNCTION__))
1885 "Invalid vector kind")(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1885, __PRETTY_FUNCTION__))
;
1886 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1887 HasExplicitAmount);
1888 Op->Reg.ElementWidth = ElementWidth;
1889 return Op;
1890 }
1891
1892 static std::unique_ptr<AArch64Operand>
1893 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1894 unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1895 MCContext &Ctx) {
1896 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
1897 Op->VectorList.RegNum = RegNum;
1898 Op->VectorList.Count = Count;
1899 Op->VectorList.NumElements = NumElements;
1900 Op->VectorList.ElementWidth = ElementWidth;
1901 Op->VectorList.RegisterKind = RegisterKind;
1902 Op->StartLoc = S;
1903 Op->EndLoc = E;
1904 return Op;
1905 }
1906
1907 static std::unique_ptr<AArch64Operand>
1908 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1909 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1910 Op->VectorIndex.Val = Idx;
1911 Op->StartLoc = S;
1912 Op->EndLoc = E;
1913 return Op;
1914 }
1915
1916 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1917 SMLoc E, MCContext &Ctx) {
1918 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
1919 Op->Imm.Val = Val;
1920 Op->StartLoc = S;
1921 Op->EndLoc = E;
1922 return Op;
1923 }
1924
1925 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1926 unsigned ShiftAmount,
1927 SMLoc S, SMLoc E,
1928 MCContext &Ctx) {
1929 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1930 Op->ShiftedImm .Val = Val;
1931 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1932 Op->StartLoc = S;
1933 Op->EndLoc = E;
1934 return Op;
1935 }
1936
1937 static std::unique_ptr<AArch64Operand>
1938 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1939 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
1940 Op->CondCode.Code = Code;
1941 Op->StartLoc = S;
1942 Op->EndLoc = E;
1943 return Op;
1944 }
1945
1946 static std::unique_ptr<AArch64Operand>
1947 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1948 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
1949 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1950 Op->FPImm.IsExact = IsExact;
1951 Op->StartLoc = S;
1952 Op->EndLoc = S;
1953 return Op;
1954 }
1955
1956 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1957 StringRef Str,
1958 SMLoc S,
1959 MCContext &Ctx) {
1960 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
1961 Op->Barrier.Val = Val;
1962 Op->Barrier.Data = Str.data();
1963 Op->Barrier.Length = Str.size();
1964 Op->StartLoc = S;
1965 Op->EndLoc = S;
1966 return Op;
1967 }
1968
1969 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1970 uint32_t MRSReg,
1971 uint32_t MSRReg,
1972 uint32_t PStateField,
1973 MCContext &Ctx) {
1974 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
1975 Op->SysReg.Data = Str.data();
1976 Op->SysReg.Length = Str.size();
1977 Op->SysReg.MRSReg = MRSReg;
1978 Op->SysReg.MSRReg = MSRReg;
1979 Op->SysReg.PStateField = PStateField;
1980 Op->StartLoc = S;
1981 Op->EndLoc = S;
1982 return Op;
1983 }
1984
1985 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1986 SMLoc E, MCContext &Ctx) {
1987 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
1988 Op->SysCRImm.Val = Val;
1989 Op->StartLoc = S;
1990 Op->EndLoc = E;
1991 return Op;
1992 }
1993
1994 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1995 StringRef Str,
1996 SMLoc S,
1997 MCContext &Ctx) {
1998 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
1999 Op->Prefetch.Val = Val;
2000 Op->Barrier.Data = Str.data();
2001 Op->Barrier.Length = Str.size();
2002 Op->StartLoc = S;
2003 Op->EndLoc = S;
2004 return Op;
2005 }
2006
2007 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2008 StringRef Str,
2009 SMLoc S,
2010 MCContext &Ctx) {
2011 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2012 Op->PSBHint.Val = Val;
2013 Op->PSBHint.Data = Str.data();
2014 Op->PSBHint.Length = Str.size();
2015 Op->StartLoc = S;
2016 Op->EndLoc = S;
2017 return Op;
2018 }
2019
2020 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2021 StringRef Str,
2022 SMLoc S,
2023 MCContext &Ctx) {
2024 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2025 Op->BTIHint.Val = Val << 1 | 32;
2026 Op->BTIHint.Data = Str.data();
2027 Op->BTIHint.Length = Str.size();
2028 Op->StartLoc = S;
2029 Op->EndLoc = S;
2030 return Op;
2031 }
2032
2033 static std::unique_ptr<AArch64Operand>
2034 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2035 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2036 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2037 Op->ShiftExtend.Type = ShOp;
2038 Op->ShiftExtend.Amount = Val;
2039 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2040 Op->StartLoc = S;
2041 Op->EndLoc = E;
2042 return Op;
2043 }
2044};
2045
2046} // end anonymous namespace.
2047
2048void AArch64Operand::print(raw_ostream &OS) const {
2049 switch (Kind) {
2050 case k_FPImm:
2051 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2052 if (!getFPImmIsExact())
2053 OS << " (inexact)";
2054 OS << ">";
2055 break;
2056 case k_Barrier: {
2057 StringRef Name = getBarrierName();
2058 if (!Name.empty())
2059 OS << "<barrier " << Name << ">";
2060 else
2061 OS << "<barrier invalid #" << getBarrier() << ">";
2062 break;
2063 }
2064 case k_Immediate:
2065 OS << *getImm();
2066 break;
2067 case k_ShiftedImm: {
2068 unsigned Shift = getShiftedImmShift();
2069 OS << "<shiftedimm ";
2070 OS << *getShiftedImmVal();
2071 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2072 break;
2073 }
2074 case k_CondCode:
2075 OS << "<condcode " << getCondCode() << ">";
2076 break;
2077 case k_VectorList: {
2078 OS << "<vectorlist ";
2079 unsigned Reg = getVectorListStart();
2080 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2081 OS << Reg + i << " ";
2082 OS << ">";
2083 break;
2084 }
2085 case k_VectorIndex:
2086 OS << "<vectorindex " << getVectorIndex() << ">";
2087 break;
2088 case k_SysReg:
2089 OS << "<sysreg: " << getSysReg() << '>';
2090 break;
2091 case k_Token:
2092 OS << "'" << getToken() << "'";
2093 break;
2094 case k_SysCR:
2095 OS << "c" << getSysCR();
2096 break;
2097 case k_Prefetch: {
2098 StringRef Name = getPrefetchName();
2099 if (!Name.empty())
2100 OS << "<prfop " << Name << ">";
2101 else
2102 OS << "<prfop invalid #" << getPrefetch() << ">";
2103 break;
2104 }
2105 case k_PSBHint:
2106 OS << getPSBHintName();
2107 break;
2108 case k_BTIHint:
2109 OS << getBTIHintName();
2110 break;
2111 case k_Register:
2112 OS << "<register " << getReg() << ">";
2113 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2114 break;
2115 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2116 case k_ShiftExtend:
2117 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2118 << getShiftExtendAmount();
2119 if (!hasShiftExtendAmount())
2120 OS << "<imp>";
2121 OS << '>';
2122 break;
2123 }
2124}
2125
2126/// @name Auto-generated Match Functions
2127/// {
2128
2129static unsigned MatchRegisterName(StringRef Name);
2130
2131/// }
2132
2133static unsigned MatchNeonVectorRegName(StringRef Name) {
2134 return StringSwitch<unsigned>(Name.lower())
2135 .Case("v0", AArch64::Q0)
2136 .Case("v1", AArch64::Q1)
2137 .Case("v2", AArch64::Q2)
2138 .Case("v3", AArch64::Q3)
2139 .Case("v4", AArch64::Q4)
2140 .Case("v5", AArch64::Q5)
2141 .Case("v6", AArch64::Q6)
2142 .Case("v7", AArch64::Q7)
2143 .Case("v8", AArch64::Q8)
2144 .Case("v9", AArch64::Q9)
2145 .Case("v10", AArch64::Q10)
2146 .Case("v11", AArch64::Q11)
2147 .Case("v12", AArch64::Q12)
2148 .Case("v13", AArch64::Q13)
2149 .Case("v14", AArch64::Q14)
2150 .Case("v15", AArch64::Q15)
2151 .Case("v16", AArch64::Q16)
2152 .Case("v17", AArch64::Q17)
2153 .Case("v18", AArch64::Q18)
2154 .Case("v19", AArch64::Q19)
2155 .Case("v20", AArch64::Q20)
2156 .Case("v21", AArch64::Q21)
2157 .Case("v22", AArch64::Q22)
2158 .Case("v23", AArch64::Q23)
2159 .Case("v24", AArch64::Q24)
2160 .Case("v25", AArch64::Q25)
2161 .Case("v26", AArch64::Q26)
2162 .Case("v27", AArch64::Q27)
2163 .Case("v28", AArch64::Q28)
2164 .Case("v29", AArch64::Q29)
2165 .Case("v30", AArch64::Q30)
2166 .Case("v31", AArch64::Q31)
2167 .Default(0);
2168}
2169
2170/// Returns an optional pair of (#elements, element-width) if Suffix
2171/// is a valid vector kind. Where the number of elements in a vector
2172/// or the vector width is implicit or explicitly unknown (but still a
2173/// valid suffix kind), 0 is used.
2174static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2175 RegKind VectorKind) {
2176 std::pair<int, int> Res = {-1, -1};
2177
2178 switch (VectorKind) {
2179 case RegKind::NeonVector:
2180 Res =
2181 StringSwitch<std::pair<int, int>>(Suffix.lower())
2182 .Case("", {0, 0})
2183 .Case(".1d", {1, 64})
2184 .Case(".1q", {1, 128})
2185 // '.2h' needed for fp16 scalar pairwise reductions
2186 .Case(".2h", {2, 16})
2187 .Case(".2s", {2, 32})
2188 .Case(".2d", {2, 64})
2189 // '.4b' is another special case for the ARMv8.2a dot product
2190 // operand
2191 .Case(".4b", {4, 8})
2192 .Case(".4h", {4, 16})
2193 .Case(".4s", {4, 32})
2194 .Case(".8b", {8, 8})
2195 .Case(".8h", {8, 16})
2196 .Case(".16b", {16, 8})
2197 // Accept the width neutral ones, too, for verbose syntax. If those
2198 // aren't used in the right places, the token operand won't match so
2199 // all will work out.
2200 .Case(".b", {0, 8})
2201 .Case(".h", {0, 16})
2202 .Case(".s", {0, 32})
2203 .Case(".d", {0, 64})
2204 .Default({-1, -1});
2205 break;
2206 case RegKind::SVEPredicateVector:
2207 case RegKind::SVEDataVector:
2208 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2209 .Case("", {0, 0})
2210 .Case(".b", {0, 8})
2211 .Case(".h", {0, 16})
2212 .Case(".s", {0, 32})
2213 .Case(".d", {0, 64})
2214 .Case(".q", {0, 128})
2215 .Default({-1, -1});
2216 break;
2217 default:
2218 llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2218)
;
2219 }
2220
2221 if (Res == std::make_pair(-1, -1))
2222 return Optional<std::pair<int, int>>();
2223
2224 return Optional<std::pair<int, int>>(Res);
2225}
2226
2227static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2228 return parseVectorKind(Suffix, VectorKind).hasValue();
2229}
2230
2231static unsigned matchSVEDataVectorRegName(StringRef Name) {
2232 return StringSwitch<unsigned>(Name.lower())
2233 .Case("z0", AArch64::Z0)
2234 .Case("z1", AArch64::Z1)
2235 .Case("z2", AArch64::Z2)
2236 .Case("z3", AArch64::Z3)
2237 .Case("z4", AArch64::Z4)
2238 .Case("z5", AArch64::Z5)
2239 .Case("z6", AArch64::Z6)
2240 .Case("z7", AArch64::Z7)
2241 .Case("z8", AArch64::Z8)
2242 .Case("z9", AArch64::Z9)
2243 .Case("z10", AArch64::Z10)
2244 .Case("z11", AArch64::Z11)
2245 .Case("z12", AArch64::Z12)
2246 .Case("z13", AArch64::Z13)
2247 .Case("z14", AArch64::Z14)
2248 .Case("z15", AArch64::Z15)
2249 .Case("z16", AArch64::Z16)
2250 .Case("z17", AArch64::Z17)
2251 .Case("z18", AArch64::Z18)
2252 .Case("z19", AArch64::Z19)
2253 .Case("z20", AArch64::Z20)
2254 .Case("z21", AArch64::Z21)
2255 .Case("z22", AArch64::Z22)
2256 .Case("z23", AArch64::Z23)
2257 .Case("z24", AArch64::Z24)
2258 .Case("z25", AArch64::Z25)
2259 .Case("z26", AArch64::Z26)
2260 .Case("z27", AArch64::Z27)
2261 .Case("z28", AArch64::Z28)
2262 .Case("z29", AArch64::Z29)
2263 .Case("z30", AArch64::Z30)
2264 .Case("z31", AArch64::Z31)
2265 .Default(0);
2266}
2267
2268static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2269 return StringSwitch<unsigned>(Name.lower())
2270 .Case("p0", AArch64::P0)
2271 .Case("p1", AArch64::P1)
2272 .Case("p2", AArch64::P2)
2273 .Case("p3", AArch64::P3)
2274 .Case("p4", AArch64::P4)
2275 .Case("p5", AArch64::P5)
2276 .Case("p6", AArch64::P6)
2277 .Case("p7", AArch64::P7)
2278 .Case("p8", AArch64::P8)
2279 .Case("p9", AArch64::P9)
2280 .Case("p10", AArch64::P10)
2281 .Case("p11", AArch64::P11)
2282 .Case("p12", AArch64::P12)
2283 .Case("p13", AArch64::P13)
2284 .Case("p14", AArch64::P14)
2285 .Case("p15", AArch64::P15)
2286 .Default(0);
2287}
2288
2289bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2290 SMLoc &EndLoc) {
2291 return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
2292}
2293
2294OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2295 SMLoc &StartLoc,
2296 SMLoc &EndLoc) {
2297 StartLoc = getLoc();
2298 auto Res = tryParseScalarRegister(RegNo);
2299 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2300 return Res;
2301}
2302
2303// Matches a register name or register alias previously defined by '.req'
2304unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2305 RegKind Kind) {
2306 unsigned RegNum = 0;
2307 if ((RegNum = matchSVEDataVectorRegName(Name)))
2308 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2309
2310 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2311 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2312
2313 if ((RegNum = MatchNeonVectorRegName(Name)))
2314 return Kind == RegKind::NeonVector ? RegNum : 0;
2315
2316 // The parsed register must be of RegKind Scalar
2317 if ((RegNum = MatchRegisterName(Name)))
2318 return Kind == RegKind::Scalar ? RegNum : 0;
2319
2320 if (!RegNum) {
2321 // Handle a few common aliases of registers.
2322 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2323 .Case("fp", AArch64::FP)
2324 .Case("lr", AArch64::LR)
2325 .Case("x31", AArch64::XZR)
2326 .Case("w31", AArch64::WZR)
2327 .Default(0))
2328 return Kind == RegKind::Scalar ? RegNum : 0;
2329
2330 // Check for aliases registered via .req. Canonicalize to lower case.
2331 // That's more consistent since register names are case insensitive, and
2332 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2333 auto Entry = RegisterReqs.find(Name.lower());
2334 if (Entry == RegisterReqs.end())
2335 return 0;
2336
2337 // set RegNum if the match is the right kind of register
2338 if (Kind == Entry->getValue().first)
2339 RegNum = Entry->getValue().second;
2340 }
2341 return RegNum;
2342}
2343
2344/// tryParseScalarRegister - Try to parse a register name. The token must be an
2345/// Identifier when called, and if it is a register name the token is eaten and
2346/// the register is added to the operand list.
2347OperandMatchResultTy
2348AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2349 MCAsmParser &Parser = getParser();
2350 const AsmToken &Tok = Parser.getTok();
2351 if (Tok.isNot(AsmToken::Identifier))
2352 return MatchOperand_NoMatch;
2353
2354 std::string lowerCase = Tok.getString().lower();
2355 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2356 if (Reg == 0)
2357 return MatchOperand_NoMatch;
2358
2359 RegNum = Reg;
2360 Parser.Lex(); // Eat identifier token.
2361 return MatchOperand_Success;
2362}
2363
2364/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2365OperandMatchResultTy
2366AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2367 MCAsmParser &Parser = getParser();
2368 SMLoc S = getLoc();
2369
2370 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2371 Error(S, "Expected cN operand where 0 <= N <= 15");
2372 return MatchOperand_ParseFail;
2373 }
2374
2375 StringRef Tok = Parser.getTok().getIdentifier();
2376 if (Tok[0] != 'c' && Tok[0] != 'C') {
2377 Error(S, "Expected cN operand where 0 <= N <= 15");
2378 return MatchOperand_ParseFail;
2379 }
2380
2381 uint32_t CRNum;
2382 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2383 if (BadNum || CRNum > 15) {
2384 Error(S, "Expected cN operand where 0 <= N <= 15");
2385 return MatchOperand_ParseFail;
2386 }
2387
2388 Parser.Lex(); // Eat identifier token.
2389 Operands.push_back(
2390 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2391 return MatchOperand_Success;
2392}
2393
2394/// tryParsePrefetch - Try to parse a prefetch operand.
2395template <bool IsSVEPrefetch>
2396OperandMatchResultTy
2397AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2398 MCAsmParser &Parser = getParser();
2399 SMLoc S = getLoc();
2400 const AsmToken &Tok = Parser.getTok();
2401
2402 auto LookupByName = [](StringRef N) {
2403 if (IsSVEPrefetch) {
2404 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2405 return Optional<unsigned>(Res->Encoding);
2406 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2407 return Optional<unsigned>(Res->Encoding);
2408 return Optional<unsigned>();
2409 };
2410
2411 auto LookupByEncoding = [](unsigned E) {
2412 if (IsSVEPrefetch) {
2413 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2414 return Optional<StringRef>(Res->Name);
2415 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2416 return Optional<StringRef>(Res->Name);
2417 return Optional<StringRef>();
2418 };
2419 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2420
2421 // Either an identifier for named values or a 5-bit immediate.
2422 // Eat optional hash.
2423 if (parseOptionalToken(AsmToken::Hash) ||
2424 Tok.is(AsmToken::Integer)) {
2425 const MCExpr *ImmVal;
2426 if (getParser().parseExpression(ImmVal))
2427 return MatchOperand_ParseFail;
2428
2429 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2430 if (!MCE) {
2431 TokError("immediate value expected for prefetch operand");
2432 return MatchOperand_ParseFail;
2433 }
2434 unsigned prfop = MCE->getValue();
2435 if (prfop > MaxVal) {
2436 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2437 "] expected");
2438 return MatchOperand_ParseFail;
2439 }
2440
2441 auto PRFM = LookupByEncoding(MCE->getValue());
2442 Operands.push_back(AArch64Operand::CreatePrefetch(
2443 prfop, PRFM.getValueOr(""), S, getContext()));
2444 return MatchOperand_Success;
2445 }
2446
2447 if (Tok.isNot(AsmToken::Identifier)) {
2448 TokError("prefetch hint expected");
2449 return MatchOperand_ParseFail;
2450 }
2451
2452 auto PRFM = LookupByName(Tok.getString());
2453 if (!PRFM) {
2454 TokError("prefetch hint expected");
2455 return MatchOperand_ParseFail;
2456 }
2457
2458 Operands.push_back(AArch64Operand::CreatePrefetch(
2459 *PRFM, Tok.getString(), S, getContext()));
2460 Parser.Lex(); // Eat identifier token.
2461 return MatchOperand_Success;
2462}
2463
2464/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2465OperandMatchResultTy
2466AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2467 MCAsmParser &Parser = getParser();
2468 SMLoc S = getLoc();
2469 const AsmToken &Tok = Parser.getTok();
2470 if (Tok.isNot(AsmToken::Identifier)) {
2471 TokError("invalid operand for instruction");
2472 return MatchOperand_ParseFail;
2473 }
2474
2475 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2476 if (!PSB) {
2477 TokError("invalid operand for instruction");
2478 return MatchOperand_ParseFail;
2479 }
2480
2481 Operands.push_back(AArch64Operand::CreatePSBHint(
2482 PSB->Encoding, Tok.getString(), S, getContext()));
2483 Parser.Lex(); // Eat identifier token.
2484 return MatchOperand_Success;
2485}
2486
2487/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2488OperandMatchResultTy
2489AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2490 MCAsmParser &Parser = getParser();
2491 SMLoc S = getLoc();
2492 const AsmToken &Tok = Parser.getTok();
2493 if (Tok.isNot(AsmToken::Identifier)) {
2494 TokError("invalid operand for instruction");
2495 return MatchOperand_ParseFail;
2496 }
2497
2498 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2499 if (!BTI) {
2500 TokError("invalid operand for instruction");
2501 return MatchOperand_ParseFail;
2502 }
2503
2504 Operands.push_back(AArch64Operand::CreateBTIHint(
2505 BTI->Encoding, Tok.getString(), S, getContext()));
2506 Parser.Lex(); // Eat identifier token.
2507 return MatchOperand_Success;
2508}
2509
2510/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2511/// instruction.
2512OperandMatchResultTy
2513AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2514 MCAsmParser &Parser = getParser();
2515 SMLoc S = getLoc();
2516 const MCExpr *Expr = nullptr;
2517
2518 if (Parser.getTok().is(AsmToken::Hash)) {
2519 Parser.Lex(); // Eat hash token.
2520 }
2521
2522 if (parseSymbolicImmVal(Expr))
2523 return MatchOperand_ParseFail;
2524
2525 AArch64MCExpr::VariantKind ELFRefKind;
2526 MCSymbolRefExpr::VariantKind DarwinRefKind;
2527 int64_t Addend;
2528 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2529 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2530 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2531 // No modifier was specified at all; this is the syntax for an ELF basic
2532 // ADRP relocation (unfortunately).
2533 Expr =
2534 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2535 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2536 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2537 Addend != 0) {
2538 Error(S, "gotpage label reference not allowed an addend");
2539 return MatchOperand_ParseFail;
2540 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2541 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2542 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2543 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2544 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2545 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2546 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2547 // The operand must be an @page or @gotpage qualified symbolref.
2548 Error(S, "page or gotpage label reference expected");
2549 return MatchOperand_ParseFail;
2550 }
2551 }
2552
2553 // We have either a label reference possibly with addend or an immediate. The
2554 // addend is a raw value here. The linker will adjust it to only reference the
2555 // page.
2556 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2557 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2558
2559 return MatchOperand_Success;
2560}
2561
2562/// tryParseAdrLabel - Parse and validate a source label for the ADR
2563/// instruction.
2564OperandMatchResultTy
2565AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2566 SMLoc S = getLoc();
2567 const MCExpr *Expr = nullptr;
2568
2569 // Leave anything with a bracket to the default for SVE
2570 if (getParser().getTok().is(AsmToken::LBrac))
2571 return MatchOperand_NoMatch;
2572
2573 if (getParser().getTok().is(AsmToken::Hash))
2574 getParser().Lex(); // Eat hash token.
2575
2576 if (parseSymbolicImmVal(Expr))
2577 return MatchOperand_ParseFail;
2578
2579 AArch64MCExpr::VariantKind ELFRefKind;
2580 MCSymbolRefExpr::VariantKind DarwinRefKind;
2581 int64_t Addend;
2582 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2583 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2584 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2585 // No modifier was specified at all; this is the syntax for an ELF basic
2586 // ADR relocation (unfortunately).
2587 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2588 } else {
2589 Error(S, "unexpected adr label");
2590 return MatchOperand_ParseFail;
2591 }
2592 }
2593
2594 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2595 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2596 return MatchOperand_Success;
2597}
2598
2599/// tryParseFPImm - A floating point immediate expression operand.
2600template<bool AddFPZeroAsLiteral>
2601OperandMatchResultTy
2602AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2603 MCAsmParser &Parser = getParser();
2604 SMLoc S = getLoc();
2605
2606 bool Hash = parseOptionalToken(AsmToken::Hash);
2607
2608 // Handle negation, as that still comes through as a separate token.
2609 bool isNegative = parseOptionalToken(AsmToken::Minus);
2610
2611 const AsmToken &Tok = Parser.getTok();
2612 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2613 if (!Hash)
2614 return MatchOperand_NoMatch;
2615 TokError("invalid floating point immediate");
2616 return MatchOperand_ParseFail;
2617 }
2618
2619 // Parse hexadecimal representation.
2620 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2621 if (Tok.getIntVal() > 255 || isNegative) {
2622 TokError("encoded floating point value out of range");
2623 return MatchOperand_ParseFail;
2624 }
2625
2626 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2627 Operands.push_back(
2628 AArch64Operand::CreateFPImm(F, true, S, getContext()));
2629 } else {
2630 // Parse FP representation.
2631 APFloat RealVal(APFloat::IEEEdouble());
2632 auto StatusOrErr =
2633 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2634 if (errorToBool(StatusOrErr.takeError())) {
2635 TokError("invalid floating point representation");
2636 return MatchOperand_ParseFail;
2637 }
2638
2639 if (isNegative)
2640 RealVal.changeSign();
2641
2642 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2643 Operands.push_back(
2644 AArch64Operand::CreateToken("#0", false, S, getContext()));
2645 Operands.push_back(
2646 AArch64Operand::CreateToken(".0", false, S, getContext()));
2647 } else
2648 Operands.push_back(AArch64Operand::CreateFPImm(
2649 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2650 }
2651
2652 Parser.Lex(); // Eat the token.
2653
2654 return MatchOperand_Success;
2655}
2656
2657/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2658/// a shift suffix, for example '#1, lsl #12'.
2659OperandMatchResultTy
2660AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2661 MCAsmParser &Parser = getParser();
2662 SMLoc S = getLoc();
2663
2664 if (Parser.getTok().is(AsmToken::Hash))
2665 Parser.Lex(); // Eat '#'
2666 else if (Parser.getTok().isNot(AsmToken::Integer))
2667 // Operand should start from # or should be integer, emit error otherwise.
2668 return MatchOperand_NoMatch;
2669
2670 const MCExpr *Imm = nullptr;
2671 if (parseSymbolicImmVal(Imm))
2672 return MatchOperand_ParseFail;
2673 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2674 SMLoc E = Parser.getTok().getLoc();
2675 Operands.push_back(
2676 AArch64Operand::CreateImm(Imm, S, E, getContext()));
2677 return MatchOperand_Success;
2678 }
2679
2680 // Eat ','
2681 Parser.Lex();
2682
2683 // The optional operand must be "lsl #N" where N is non-negative.
2684 if (!Parser.getTok().is(AsmToken::Identifier) ||
2685 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2686 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2687 return MatchOperand_ParseFail;
2688 }
2689
2690 // Eat 'lsl'
2691 Parser.Lex();
2692
2693 parseOptionalToken(AsmToken::Hash);
2694
2695 if (Parser.getTok().isNot(AsmToken::Integer)) {
2696 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2697 return MatchOperand_ParseFail;
2698 }
2699
2700 int64_t ShiftAmount = Parser.getTok().getIntVal();
2701
2702 if (ShiftAmount < 0) {
2703 Error(Parser.getTok().getLoc(), "positive shift amount required");
2704 return MatchOperand_ParseFail;
2705 }
2706 Parser.Lex(); // Eat the number
2707
2708 // Just in case the optional lsl #0 is used for immediates other than zero.
2709 if (ShiftAmount == 0 && Imm != nullptr) {
2710 SMLoc E = Parser.getTok().getLoc();
2711 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2712 return MatchOperand_Success;
2713 }
2714
2715 SMLoc E = Parser.getTok().getLoc();
2716 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2717 S, E, getContext()));
2718 return MatchOperand_Success;
2719}
2720
2721/// parseCondCodeString - Parse a Condition Code string.
2722AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2723 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2724 .Case("eq", AArch64CC::EQ)
2725 .Case("ne", AArch64CC::NE)
2726 .Case("cs", AArch64CC::HS)
2727 .Case("hs", AArch64CC::HS)
2728 .Case("cc", AArch64CC::LO)
2729 .Case("lo", AArch64CC::LO)
2730 .Case("mi", AArch64CC::MI)
2731 .Case("pl", AArch64CC::PL)
2732 .Case("vs", AArch64CC::VS)
2733 .Case("vc", AArch64CC::VC)
2734 .Case("hi", AArch64CC::HI)
2735 .Case("ls", AArch64CC::LS)
2736 .Case("ge", AArch64CC::GE)
2737 .Case("lt", AArch64CC::LT)
2738 .Case("gt", AArch64CC::GT)
2739 .Case("le", AArch64CC::LE)
2740 .Case("al", AArch64CC::AL)
2741 .Case("nv", AArch64CC::NV)
2742 .Default(AArch64CC::Invalid);
2743
2744 if (CC == AArch64CC::Invalid &&
2745 getSTI().getFeatureBits()[AArch64::FeatureSVE])
2746 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2747 .Case("none", AArch64CC::EQ)
2748 .Case("any", AArch64CC::NE)
2749 .Case("nlast", AArch64CC::HS)
2750 .Case("last", AArch64CC::LO)
2751 .Case("first", AArch64CC::MI)
2752 .Case("nfrst", AArch64CC::PL)
2753 .Case("pmore", AArch64CC::HI)
2754 .Case("plast", AArch64CC::LS)
2755 .Case("tcont", AArch64CC::GE)
2756 .Case("tstop", AArch64CC::LT)
2757 .Default(AArch64CC::Invalid);
2758
2759 return CC;
2760}
2761
2762/// parseCondCode - Parse a Condition Code operand.
2763bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2764 bool invertCondCode) {
2765 MCAsmParser &Parser = getParser();
2766 SMLoc S = getLoc();
2767 const AsmToken &Tok = Parser.getTok();
2768 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")((Tok.is(AsmToken::Identifier) && "Token is not an Identifier"
) ? static_cast<void> (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2768, __PRETTY_FUNCTION__))
;
2769
2770 StringRef Cond = Tok.getString();
2771 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2772 if (CC == AArch64CC::Invalid)
2773 return TokError("invalid condition code");
2774 Parser.Lex(); // Eat identifier token.
2775
2776 if (invertCondCode) {
2777 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2778 return TokError("condition codes AL and NV are invalid for this instruction");
2779 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2780 }
2781
2782 Operands.push_back(
2783 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2784 return false;
2785}
2786
2787/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2788/// them if present.
2789OperandMatchResultTy
2790AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2791 MCAsmParser &Parser = getParser();
2792 const AsmToken &Tok = Parser.getTok();
2793 std::string LowerID = Tok.getString().lower();
2794 AArch64_AM::ShiftExtendType ShOp =
2795 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2796 .Case("lsl", AArch64_AM::LSL)
2797 .Case("lsr", AArch64_AM::LSR)
2798 .Case("asr", AArch64_AM::ASR)
2799 .Case("ror", AArch64_AM::ROR)
2800 .Case("msl", AArch64_AM::MSL)
2801 .Case("uxtb", AArch64_AM::UXTB)
2802 .Case("uxth", AArch64_AM::UXTH)
2803 .Case("uxtw", AArch64_AM::UXTW)
2804 .Case("uxtx", AArch64_AM::UXTX)
2805 .Case("sxtb", AArch64_AM::SXTB)
2806 .Case("sxth", AArch64_AM::SXTH)
2807 .Case("sxtw", AArch64_AM::SXTW)
2808 .Case("sxtx", AArch64_AM::SXTX)
2809 .Default(AArch64_AM::InvalidShiftExtend);
2810
2811 if (ShOp == AArch64_AM::InvalidShiftExtend)
2812 return MatchOperand_NoMatch;
2813
2814 SMLoc S = Tok.getLoc();
2815 Parser.Lex();
2816
2817 bool Hash = parseOptionalToken(AsmToken::Hash);
2818
2819 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2820 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2821 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2822 ShOp == AArch64_AM::MSL) {
2823 // We expect a number here.
2824 TokError("expected #imm after shift specifier");
2825 return MatchOperand_ParseFail;
2826 }
2827
2828 // "extend" type operations don't need an immediate, #0 is implicit.
2829 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2830 Operands.push_back(
2831 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2832 return MatchOperand_Success;
2833 }
2834
2835 // Make sure we do actually have a number, identifier or a parenthesized
2836 // expression.
2837 SMLoc E = Parser.getTok().getLoc();
2838 if (!Parser.getTok().is(AsmToken::Integer) &&
2839 !Parser.getTok().is(AsmToken::LParen) &&
2840 !Parser.getTok().is(AsmToken::Identifier)) {
2841 Error(E, "expected integer shift amount");
2842 return MatchOperand_ParseFail;
2843 }
2844
2845 const MCExpr *ImmVal;
2846 if (getParser().parseExpression(ImmVal))
2847 return MatchOperand_ParseFail;
2848
2849 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2850 if (!MCE) {
2851 Error(E, "expected constant '#imm' after shift specifier");
2852 return MatchOperand_ParseFail;
2853 }
2854
2855 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2856 Operands.push_back(AArch64Operand::CreateShiftExtend(
2857 ShOp, MCE->getValue(), true, S, E, getContext()));
2858 return MatchOperand_Success;
2859}
2860
2861static const struct Extension {
2862 const char *Name;
2863 const FeatureBitset Features;
2864} ExtensionMap[] = {
2865 {"crc", {AArch64::FeatureCRC}},
2866 {"sm4", {AArch64::FeatureSM4}},
2867 {"sha3", {AArch64::FeatureSHA3}},
2868 {"sha2", {AArch64::FeatureSHA2}},
2869 {"aes", {AArch64::FeatureAES}},
2870 {"crypto", {AArch64::FeatureCrypto}},
2871 {"fp", {AArch64::FeatureFPARMv8}},
2872 {"simd", {AArch64::FeatureNEON}},
2873 {"ras", {AArch64::FeatureRAS}},
2874 {"lse", {AArch64::FeatureLSE}},
2875 {"predres", {AArch64::FeaturePredRes}},
2876 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
2877 {"mte", {AArch64::FeatureMTE}},
2878 {"memtag", {AArch64::FeatureMTE}},
2879 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
2880 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
2881 {"ccpp", {AArch64::FeatureCCPP}},
2882 {"rcpc", {AArch64::FeatureRCPC}},
2883 {"sve", {AArch64::FeatureSVE}},
2884 {"sve2", {AArch64::FeatureSVE2}},
2885 {"sve2-aes", {AArch64::FeatureSVE2AES}},
2886 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
2887 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
2888 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
2889 // FIXME: Unsupported extensions
2890 {"pan", {}},
2891 {"lor", {}},
2892 {"rdma", {}},
2893 {"profile", {}},
2894};
2895
2896static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2897 if (FBS[AArch64::HasV8_1aOps])
2898 Str += "ARMv8.1a";
2899 else if (FBS[AArch64::HasV8_2aOps])
2900 Str += "ARMv8.2a";
2901 else if (FBS[AArch64::HasV8_3aOps])
2902 Str += "ARMv8.3a";
2903 else if (FBS[AArch64::HasV8_4aOps])
2904 Str += "ARMv8.4a";
2905 else if (FBS[AArch64::HasV8_5aOps])
2906 Str += "ARMv8.5a";
2907 else if (FBS[AArch64::HasV8_6aOps])
2908 Str += "ARMv8.6a";
2909 else {
2910 auto ext = std::find_if(std::begin(ExtensionMap),
2911 std::end(ExtensionMap),
2912 [&](const Extension& e)
2913 // Use & in case multiple features are enabled
2914 { return (FBS & e.Features) != FeatureBitset(); }
2915 );
2916
2917 Str += ext != std::end(ExtensionMap) ? ext->Name : "(unknown)";
2918 }
2919}
2920
2921void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2922 SMLoc S) {
2923 const uint16_t Op2 = Encoding & 7;
2924 const uint16_t Cm = (Encoding & 0x78) >> 3;
2925 const uint16_t Cn = (Encoding & 0x780) >> 7;
2926 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2927
2928 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2929
2930 Operands.push_back(
2931 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2932 Operands.push_back(
2933 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2934 Operands.push_back(
2935 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2936 Expr = MCConstantExpr::create(Op2, getContext());
2937 Operands.push_back(
2938 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2939}
2940
2941/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2942/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2943bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2944 OperandVector &Operands) {
2945 if (Name.find('.') != StringRef::npos)
2946 return TokError("invalid operand");
2947
2948 Mnemonic = Name;
2949 Operands.push_back(
2950 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2951
2952 MCAsmParser &Parser = getParser();
2953 const AsmToken &Tok = Parser.getTok();
2954 StringRef Op = Tok.getString();
2955 SMLoc S = Tok.getLoc();
2956
2957 if (Mnemonic == "ic") {
2958 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2959 if (!IC)
2960 return TokError("invalid operand for IC instruction");
2961 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2962 std::string Str("IC " + std::string(IC->Name) + " requires ");
2963 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2964 return TokError(Str.c_str());
2965 }
2966 createSysAlias(IC->Encoding, Operands, S);
2967 } else if (Mnemonic == "dc") {
2968 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2969 if (!DC)
2970 return TokError("invalid operand for DC instruction");
2971 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2972 std::string Str("DC " + std::string(DC->Name) + " requires ");
2973 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
2974 return TokError(Str.c_str());
2975 }
2976 createSysAlias(DC->Encoding, Operands, S);
2977 } else if (Mnemonic == "at") {
2978 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2979 if (!AT)
2980 return TokError("invalid operand for AT instruction");
2981 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2982 std::string Str("AT " + std::string(AT->Name) + " requires ");
2983 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
2984 return TokError(Str.c_str());
2985 }
2986 createSysAlias(AT->Encoding, Operands, S);
2987 } else if (Mnemonic == "tlbi") {
2988 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2989 if (!TLBI)
2990 return TokError("invalid operand for TLBI instruction");
2991 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2992 std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2993 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
2994 return TokError(Str.c_str());
2995 }
2996 createSysAlias(TLBI->Encoding, Operands, S);
2997 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
2998 const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
2999 if (!PRCTX)
3000 return TokError("invalid operand for prediction restriction instruction");
3001 else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
3002 std::string Str(
3003 Mnemonic.upper() + std::string(PRCTX->Name) + " requires ");
3004 setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
3005 return TokError(Str.c_str());
3006 }
3007 uint16_t PRCTX_Op2 =
3008 Mnemonic == "cfp" ? 4 :
3009 Mnemonic == "dvp" ? 5 :
3010 Mnemonic == "cpp" ? 7 :
3011 0;
3012 assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction")((PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction"
) ? static_cast<void> (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3012, __PRETTY_FUNCTION__))
;
3013 createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
3014 }
3015
3016 Parser.Lex(); // Eat operand.
3017
3018 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3019 bool HasRegister = false;
3020
3021 // Check for the optional register operand.
3022 if (parseOptionalToken(AsmToken::Comma)) {
3023 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3024 return TokError("expected register operand");
3025 HasRegister = true;
3026 }
3027
3028 if (ExpectRegister && !HasRegister)
3029 return TokError("specified " + Mnemonic + " op requires a register");
3030 else if (!ExpectRegister && HasRegister)
3031 return TokError("specified " + Mnemonic + " op does not use a register");
3032
3033 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3034 return true;
3035
3036 return false;
3037}
3038
3039OperandMatchResultTy
3040AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3041 MCAsmParser &Parser = getParser();
3042 const AsmToken &Tok = Parser.getTok();
3043
3044 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3045 TokError("'csync' operand expected");
3046 return MatchOperand_ParseFail;
3047 // Can be either a #imm style literal or an option name
3048 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3049 // Immediate operand.
3050 const MCExpr *ImmVal;
3051 SMLoc ExprLoc = getLoc();
3052 if (getParser().parseExpression(ImmVal))
3053 return MatchOperand_ParseFail;
3054 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3055 if (!MCE) {
3056 Error(ExprLoc, "immediate value expected for barrier operand");
3057 return MatchOperand_ParseFail;
3058 }
3059 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
3060 Error(ExprLoc, "barrier operand out of range");
3061 return MatchOperand_ParseFail;
3062 }
3063 auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
3064 Operands.push_back(AArch64Operand::CreateBarrier(
3065 MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
3066 return MatchOperand_Success;
3067 }
3068
3069 if (Tok.isNot(AsmToken::Identifier)) {
3070 TokError("invalid operand for instruction");
3071 return MatchOperand_ParseFail;
3072 }
3073
3074 auto TSB = AArch64TSB::lookupTSBByName(Tok.getString());
3075 // The only valid named option for ISB is 'sy'
3076 auto DB = AArch64DB::lookupDBByName(Tok.getString());
3077 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3078 TokError("'sy' or #imm operand expected");
3079 return MatchOperand_ParseFail;
3080 // The only valid named option for TSB is 'csync'
3081 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3082 TokError("'csync' operand expected");
3083 return MatchOperand_ParseFail;
3084 } else if (!DB && !TSB) {
3085 TokError("invalid barrier option name");
3086 return MatchOperand_ParseFail;
3087 }
3088
3089 Operands.push_back(AArch64Operand::CreateBarrier(
3090 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext()));
3091 Parser.Lex(); // Consume the option
3092
3093 return MatchOperand_Success;
3094}
3095
3096OperandMatchResultTy
3097AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3098 MCAsmParser &Parser = getParser();
3099 const AsmToken &Tok = Parser.getTok();
3100
3101 if (Tok.isNot(AsmToken::Identifier))
3102 return MatchOperand_NoMatch;
3103
3104 int MRSReg, MSRReg;
3105 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3106 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3107 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3108 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3109 } else
3110 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3111
3112 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3113 unsigned PStateImm = -1;
3114 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3115 PStateImm = PState->Encoding;
3116
3117 Operands.push_back(
3118 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3119 PStateImm, getContext()));
3120 Parser.Lex(); // Eat identifier
3121
3122 return MatchOperand_Success;
3123}
3124
3125/// tryParseNeonVectorRegister - Parse a vector register operand.
3126bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3127 MCAsmParser &Parser = getParser();
3128 if (Parser.getTok().isNot(AsmToken::Identifier))
3129 return true;
3130
3131 SMLoc S = getLoc();
3132 // Check for a vector register specifier first.
3133 StringRef Kind;
3134 unsigned Reg;
3135 OperandMatchResultTy Res =
3136 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3137 if (Res != MatchOperand_Success)
3138 return true;
3139
3140 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3141 if (!KindRes)
3142 return true;
3143
3144 unsigned ElementWidth = KindRes->second;
3145 Operands.push_back(
3146 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3147 S, getLoc(), getContext()));
3148
3149 // If there was an explicit qualifier, that goes on as a literal text
3150 // operand.
3151 if (!Kind.empty())
3152 Operands.push_back(
3153 AArch64Operand::CreateToken(Kind, false, S, getContext()));
3154
3155 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3156}
3157
3158OperandMatchResultTy
3159AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3160 SMLoc SIdx = getLoc();
3161 if (parseOptionalToken(AsmToken::LBrac)) {
3162 const MCExpr *ImmVal;
3163 if (getParser().parseExpression(ImmVal))
3164 return MatchOperand_NoMatch;
3165 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3166 if (!MCE) {
3167 TokError("immediate value expected for vector index");
3168 return MatchOperand_ParseFail;;
3169 }
3170
3171 SMLoc E = getLoc();
3172
3173 if (parseToken(AsmToken::RBrac, "']' expected"))
3174 return MatchOperand_ParseFail;;
3175
3176 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3177 E, getContext()));
3178 return MatchOperand_Success;
3179 }
3180
3181 return MatchOperand_NoMatch;
3182}
3183
3184// tryParseVectorRegister - Try to parse a vector register name with
3185// optional kind specifier. If it is a register specifier, eat the token
3186// and return it.
3187OperandMatchResultTy
3188AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3189 RegKind MatchKind) {
3190 MCAsmParser &Parser = getParser();
3191 const AsmToken &Tok = Parser.getTok();
3192
3193 if (Tok.isNot(AsmToken::Identifier))
3194 return MatchOperand_NoMatch;
3195
3196 StringRef Name = Tok.getString();
3197 // If there is a kind specifier, it's separated from the register name by
3198 // a '.'.
3199 size_t Start = 0, Next = Name.find('.');
3200 StringRef Head = Name.slice(Start, Next);
3201 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3202
3203 if (RegNum) {
3204 if (Next != StringRef::npos) {
3205 Kind = Name.slice(Next, StringRef::npos);
3206 if (!isValidVectorKind(Kind, MatchKind)) {
3207 TokError("invalid vector kind qualifier");
3208 return MatchOperand_ParseFail;
3209 }
3210 }
3211 Parser.Lex(); // Eat the register token.
3212
3213 Reg = RegNum;
3214 return MatchOperand_Success;
3215 }
3216
3217 return MatchOperand_NoMatch;
3218}
3219
3220/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3221OperandMatchResultTy
3222AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3223 // Check for a SVE predicate register specifier first.
3224 const SMLoc S = getLoc();
3225 StringRef Kind;
3226 unsigned RegNum;
3227 auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3228 if (Res != MatchOperand_Success)
3229 return Res;
3230
3231 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3232 if (!KindRes)
3233 return MatchOperand_NoMatch;
3234
3235 unsigned ElementWidth = KindRes->second;
3236 Operands.push_back(AArch64Operand::CreateVectorReg(
3237 RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3238 getLoc(), getContext()));
3239
3240 // Not all predicates are followed by a '/m' or '/z'.
3241 MCAsmParser &Parser = getParser();
3242 if (Parser.getTok().isNot(AsmToken::Slash))
3243 return MatchOperand_Success;
3244
3245 // But when they do they shouldn't have an element type suffix.
3246 if (!Kind.empty()) {
3247 Error(S, "not expecting size suffix");
3248 return MatchOperand_ParseFail;
3249 }
3250
3251 // Add a literal slash as operand
3252 Operands.push_back(
3253 AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3254
3255 Parser.Lex(); // Eat the slash.
3256
3257 // Zeroing or merging?
3258 auto Pred = Parser.getTok().getString().lower();
3259 if (Pred != "z" && Pred != "m") {
3260 Error(getLoc(), "expecting 'm' or 'z' predication");
3261 return MatchOperand_ParseFail;
3262 }
3263
3264 // Add zero/merge token.
3265 const char *ZM = Pred == "z" ? "z" : "m";
3266 Operands.push_back(
3267 AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3268
3269 Parser.Lex(); // Eat zero/merge token.
3270 return MatchOperand_Success;
3271}
3272
3273/// parseRegister - Parse a register operand.
3274bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3275 // Try for a Neon vector register.
3276 if (!tryParseNeonVectorRegister(Operands))
3277 return false;
3278
3279 // Otherwise try for a scalar register.
3280 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3281 return false;
3282
3283 return true;
3284}
3285
3286bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3287 MCAsmParser &Parser = getParser();
3288 bool HasELFModifier = false;
3289 AArch64MCExpr::VariantKind RefKind;
3290
3291 if (parseOptionalToken(AsmToken::Colon)) {
3292 HasELFModifier = true;
3293
3294 if (Parser.getTok().isNot(AsmToken::Identifier))
3295 return TokError("expect relocation specifier in operand after ':'");
3296
3297 std::string LowerCase = Parser.getTok().getIdentifier().lower();
3298 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3299 .Case("lo12", AArch64MCExpr::VK_LO12)
3300 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3301 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3302 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3303 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3304 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3305 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3306 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3307 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3308 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3309 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3310 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3311 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3312 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3313 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3314 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3315 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3316 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3317 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3318 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3319 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3320 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3321 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3322 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3323 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3324 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3325 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3326 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3327 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3328 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3329 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3330 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3331 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3332 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3333 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3334 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3335 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3336 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3337 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3338 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3339 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3340 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3341 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3342 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3343 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3344 .Default(AArch64MCExpr::VK_INVALID);
3345
3346 if (RefKind == AArch64MCExpr::VK_INVALID)
3347 return TokError("expect relocation specifier in operand after ':'");
3348
3349 Parser.Lex(); // Eat identifier
3350
3351 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3352 return true;
3353 }
3354
3355 if (getParser().parseExpression(ImmVal))
3356 return true;
3357
3358 if (HasELFModifier)
3359 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3360
3361 return false;
3362}
3363
3364template <RegKind VectorKind>
3365OperandMatchResultTy
3366AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3367 bool ExpectMatch) {
3368 MCAsmParser &Parser = getParser();
3369 if (!Parser.getTok().is(AsmToken::LCurly))
3370 return MatchOperand_NoMatch;
3371
3372 // Wrapper around parse function
3373 auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3374 bool NoMatchIsError) {
3375 auto RegTok = Parser.getTok();
3376 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3377 if (ParseRes == MatchOperand_Success) {
3378 if (parseVectorKind(Kind, VectorKind))
3379 return ParseRes;
3380 llvm_unreachable("Expected a valid vector kind")::llvm::llvm_unreachable_internal("Expected a valid vector kind"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3380)
;
3381 }
3382
3383 if (RegTok.isNot(AsmToken::Identifier) ||
3384 ParseRes == MatchOperand_ParseFail ||
3385 (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3386 Error(Loc, "vector register expected");
3387 return MatchOperand_ParseFail;
3388 }
3389
3390 return MatchOperand_NoMatch;
3391 };
3392
3393 SMLoc S = getLoc();
3394 auto LCurly = Parser.getTok();
3395 Parser.Lex(); // Eat left bracket token.
3396
3397 StringRef Kind;
3398 unsigned FirstReg;
3399 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3400
3401 // Put back the original left bracket if there was no match, so that
3402 // different types of list-operands can be matched (e.g. SVE, Neon).
3403 if (ParseRes == MatchOperand_NoMatch)
3404 Parser.getLexer().UnLex(LCurly);
3405
3406 if (ParseRes != MatchOperand_Success)
3407 return ParseRes;
3408
3409 int64_t PrevReg = FirstReg;
3410 unsigned Count = 1;
3411
3412 if (parseOptionalToken(AsmToken::Minus)) {
3413 SMLoc Loc = getLoc();
3414 StringRef NextKind;
3415
3416 unsigned Reg;
3417 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3418 if (ParseRes != MatchOperand_Success)
3419 return ParseRes;
3420
3421 // Any Kind suffices must match on all regs in the list.
3422 if (Kind != NextKind) {
3423 Error(Loc, "mismatched register size suffix");
3424 return MatchOperand_ParseFail;
3425 }
3426
3427 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3428
3429 if (Space == 0 || Space > 3) {
3430 Error(Loc, "invalid number of vectors");
3431 return MatchOperand_ParseFail;
3432 }
3433
3434 Count += Space;
3435 }
3436 else {
3437 while (parseOptionalToken(AsmToken::Comma)) {
3438 SMLoc Loc = getLoc();
3439 StringRef NextKind;
3440 unsigned Reg;
3441 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3442 if (ParseRes != MatchOperand_Success)
3443 return ParseRes;
3444
3445 // Any Kind suffices must match on all regs in the list.
3446 if (Kind != NextKind) {
3447 Error(Loc, "mismatched register size suffix");
3448 return MatchOperand_ParseFail;
3449 }
3450
3451 // Registers must be incremental (with wraparound at 31)
3452 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3453 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3454 Error(Loc, "registers must be sequential");
3455 return MatchOperand_ParseFail;
3456 }
3457
3458 PrevReg = Reg;
3459 ++Count;
3460 }
3461 }
3462
3463 if (parseToken(AsmToken::RCurly, "'}' expected"))
3464 return MatchOperand_ParseFail;
3465
3466 if (Count > 4) {
3467 Error(S, "invalid number of vectors");
3468 return MatchOperand_ParseFail;
3469 }
3470
3471 unsigned NumElements = 0;
3472 unsigned ElementWidth = 0;
3473 if (!Kind.empty()) {
3474 if (const auto &VK = parseVectorKind(Kind, VectorKind))
3475 std::tie(NumElements, ElementWidth) = *VK;
3476 }
3477
3478 Operands.push_back(AArch64Operand::CreateVectorList(
3479 FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3480 getContext()));
3481
3482 return MatchOperand_Success;
3483}
3484
3485/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3486bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3487 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3488 if (ParseRes != MatchOperand_Success)
3489 return true;
3490
3491 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3492}
3493
3494OperandMatchResultTy
3495AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3496 SMLoc StartLoc = getLoc();
3497
3498 unsigned RegNum;
3499 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3500 if (Res != MatchOperand_Success)
3501 return Res;
3502
3503 if (!parseOptionalToken(AsmToken::Comma)) {
3504 Operands.push_back(AArch64Operand::CreateReg(
3505 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3506 return MatchOperand_Success;
3507 }
3508
3509 parseOptionalToken(AsmToken::Hash);
3510
3511 if (getParser().getTok().isNot(AsmToken::Integer)) {
3512 Error(getLoc(), "index must be absent or #0");
3513 return MatchOperand_ParseFail;
3514 }
3515
3516 const MCExpr *ImmVal;
3517 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3518 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3519 Error(getLoc(), "index must be absent or #0");
3520 return MatchOperand_ParseFail;
3521 }
3522
3523 Operands.push_back(AArch64Operand::CreateReg(
3524 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3525 return MatchOperand_Success;
3526}
3527
3528template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3529OperandMatchResultTy
3530AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3531 SMLoc StartLoc = getLoc();
3532
3533 unsigned RegNum;
3534 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3535 if (Res != MatchOperand_Success)
3536 return Res;
3537
3538 // No shift/extend is the default.
3539 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3540 Operands.push_back(AArch64Operand::CreateReg(
3541 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3542 return MatchOperand_Success;
3543 }
3544
3545 // Eat the comma
3546 getParser().Lex();
3547
3548 // Match the shift
3549 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
3550 Res = tryParseOptionalShiftExtend(ExtOpnd);
3551 if (Res != MatchOperand_Success)
3552 return Res;
3553
3554 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3555 Operands.push_back(AArch64Operand::CreateReg(
3556 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3557 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3558 Ext->hasShiftExtendAmount()));
3559
3560 return MatchOperand_Success;
3561}
3562
3563bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3564 MCAsmParser &Parser = getParser();
3565
3566 // Some SVE instructions have a decoration after the immediate, i.e.
3567 // "mul vl". We parse them here and add tokens, which must be present in the
3568 // asm string in the tablegen instruction.
3569 bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3570 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3571 if (!Parser.getTok().getString().equals_lower("mul") ||
3572 !(NextIsVL || NextIsHash))
3573 return true;
3574
3575 Operands.push_back(
3576 AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3577 Parser.Lex(); // Eat the "mul"
3578
3579 if (NextIsVL) {
3580 Operands.push_back(
3581 AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3582 Parser.Lex(); // Eat the "vl"
3583 return false;
3584 }
3585
3586 if (NextIsHash) {
3587 Parser.Lex(); // Eat the #
3588 SMLoc S = getLoc();
3589
3590 // Parse immediate operand.
3591 const MCExpr *ImmVal;
3592 if (!Parser.parseExpression(ImmVal))
3593 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3594 Operands.push_back(AArch64Operand::CreateImm(
3595 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3596 getContext()));
3597 return MatchOperand_Success;
3598 }
3599 }
3600
3601 return Error(getLoc(), "expected 'vl' or '#<imm>'");
3602}
3603
3604/// parseOperand - Parse a arm instruction operand. For now this parses the
3605/// operand regardless of the mnemonic.
3606bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3607 bool invertCondCode) {
3608 MCAsmParser &Parser = getParser();
3609
3610 OperandMatchResultTy ResTy =
3611 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3612
3613 // Check if the current operand has a custom associated parser, if so, try to
3614 // custom parse the operand, or fallback to the general approach.
3615 if (ResTy == MatchOperand_Success)
3616 return false;
3617 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3618 // there was a match, but an error occurred, in which case, just return that
3619 // the operand parsing failed.
3620 if (ResTy == MatchOperand_ParseFail)
3621 return true;
3622
3623 // Nothing custom, so do general case parsing.
3624 SMLoc S, E;
3625 switch (getLexer().getKind()) {
3626 default: {
3627 SMLoc S = getLoc();
3628 const MCExpr *Expr;
3629 if (parseSymbolicImmVal(Expr))
3630 return Error(S, "invalid operand");
3631
3632 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3633 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3634 return false;
3635 }
3636 case AsmToken::LBrac: {
3637 SMLoc Loc = Parser.getTok().getLoc();
3638 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3639 getContext()));
3640 Parser.Lex(); // Eat '['
3641
3642 // There's no comma after a '[', so we can parse the next operand
3643 // immediately.
3644 return parseOperand(Operands, false, false);
3645 }
3646 case AsmToken::LCurly:
3647 return parseNeonVectorList(Operands);
3648 case AsmToken::Identifier: {
3649 // If we're expecting a Condition Code operand, then just parse that.
3650 if (isCondCode)
3651 return parseCondCode(Operands, invertCondCode);
3652
3653 // If it's a register name, parse it.
3654 if (!parseRegister(Operands))
3655 return false;
3656
3657 // See if this is a "mul vl" decoration or "mul #<int>" operand used
3658 // by SVE instructions.
3659 if (!parseOptionalMulOperand(Operands))
3660 return false;
3661
3662 // This could be an optional "shift" or "extend" operand.
3663 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3664 // We can only continue if no tokens were eaten.
3665 if (GotShift != MatchOperand_NoMatch)
3666 return GotShift;
3667
3668 // This was not a register so parse other operands that start with an
3669 // identifier (like labels) as expressions and create them as immediates.
3670 const MCExpr *IdVal;
3671 S = getLoc();
3672 if (getParser().parseExpression(IdVal))
3673 return true;
3674 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3675 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3676 return false;
3677 }
3678 case AsmToken::Integer:
3679 case AsmToken::Real:
3680 case AsmToken::Hash: {
3681 // #42 -> immediate.
3682 S = getLoc();
3683
3684 parseOptionalToken(AsmToken::Hash);
3685
3686 // Parse a negative sign
3687 bool isNegative = false;
3688 if (Parser.getTok().is(AsmToken::Minus)) {
3689 isNegative = true;
3690 // We need to consume this token only when we have a Real, otherwise
3691 // we let parseSymbolicImmVal take care of it
3692 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3693 Parser.Lex();
3694 }
3695
3696 // The only Real that should come through here is a literal #0.0 for
3697 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3698 // so convert the value.
3699 const AsmToken &Tok = Parser.getTok();
3700 if (Tok.is(AsmToken::Real)) {
3701 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3702 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3703 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3704 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3705 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3706 return TokError("unexpected floating point literal");
3707 else if (IntVal != 0 || isNegative)
3708 return TokError("expected floating-point constant #0.0");
3709 Parser.Lex(); // Eat the token.
3710
3711 Operands.push_back(
3712 AArch64Operand::CreateToken("#0", false, S, getContext()));
3713 Operands.push_back(
3714 AArch64Operand::CreateToken(".0", false, S, getContext()));
3715 return false;
3716 }
3717
3718 const MCExpr *ImmVal;
3719 if (parseSymbolicImmVal(ImmVal))
3720 return true;
3721
3722 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3723 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3724 return false;
3725 }
3726 case AsmToken::Equal: {
3727 SMLoc Loc = getLoc();
3728 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3729 return TokError("unexpected token in operand");
3730 Parser.Lex(); // Eat '='
3731 const MCExpr *SubExprVal;
3732 if (getParser().parseExpression(SubExprVal))
3733 return true;
3734
3735 if (Operands.size() < 2 ||
3736 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3737 return Error(Loc, "Only valid when first operand is register");
3738
3739 bool IsXReg =
3740 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3741 Operands[1]->getReg());
3742
3743 MCContext& Ctx = getContext();
3744 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3745 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3746 if (isa<MCConstantExpr>(SubExprVal)) {
3747 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3748 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3749 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3750 ShiftAmt += 16;
3751 Imm >>= 16;
3752 }
3753 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3754 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3755 Operands.push_back(AArch64Operand::CreateImm(
3756 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3757 if (ShiftAmt)
3758 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3759 ShiftAmt, true, S, E, Ctx));
3760 return false;
3761 }
3762 APInt Simm = APInt(64, Imm << ShiftAmt);
3763 // check if the immediate is an unsigned or signed 32-bit int for W regs
3764 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3765 return Error(Loc, "Immediate too large for register");
3766 }
3767 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3768 const MCExpr *CPLoc =
3769 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3770 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3771 return false;
3772 }
3773 }
3774}
3775
3776bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
3777 const MCExpr *Expr = nullptr;
3778 SMLoc L = getLoc();
3779 if (check(getParser().parseExpression(Expr), L, "expected expression"))
18
Assuming the condition is false
19
Taking false branch
3780 return true;
3781 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
20
Assuming null pointer is passed into cast
21
'Value' initialized to a null pointer value
3782 if (check(!Value, L, "expected constant expression"))
22
Assuming the condition is false
23
Taking false branch
3783 return true;
3784 Out = Value->getValue();
24
Called C++ object pointer is null
3785 return false;
3786}
3787
3788bool AArch64AsmParser::parseComma() {
3789 if (check(getParser().getTok().isNot(AsmToken::Comma), getLoc(),
3790 "expected comma"))
3791 return true;
3792 // Eat the comma
3793 getParser().Lex();
3794 return false;
3795}
3796
3797bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
3798 unsigned First, unsigned Last) {
3799 unsigned Reg;
3800 SMLoc Start, End;
3801 if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register"))
3802 return true;
3803
3804 // Special handling for FP and LR; they aren't linearly after x28 in
3805 // the registers enum.
3806 unsigned RangeEnd = Last;
3807 if (Base == AArch64::X0) {
3808 if (Last == AArch64::FP) {
3809 RangeEnd = AArch64::X28;
3810 if (Reg == AArch64::FP) {
3811 Out = 29;
3812 return false;
3813 }
3814 }
3815 if (Last == AArch64::LR) {
3816 RangeEnd = AArch64::X28;
3817 if (Reg == AArch64::FP) {
3818 Out = 29;
3819 return false;
3820 } else if (Reg == AArch64::LR) {
3821 Out = 30;
3822 return false;
3823 }
3824 }
3825 }
3826
3827 if (check(Reg < First || Reg > RangeEnd, Start,
3828 Twine("expected register in range ") +
3829 AArch64InstPrinter::getRegisterName(First) + " to " +
3830 AArch64InstPrinter::getRegisterName(Last)))
3831 return true;
3832 Out = Reg - Base;
3833 return false;
3834}
3835
3836bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3837 const MCParsedAsmOperand &Op2) const {
3838 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3839 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3840 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3841 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3842 return MCTargetAsmParser::regsEqual(Op1, Op2);
3843
3844 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&((AOp1.isScalarReg() && AOp2.isScalarReg() &&
"Testing equality of non-scalar registers not supported") ? static_cast
<void> (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3845, __PRETTY_FUNCTION__))
3845 "Testing equality of non-scalar registers not supported")((AOp1.isScalarReg() && AOp2.isScalarReg() &&
"Testing equality of non-scalar registers not supported") ? static_cast
<void> (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3845, __PRETTY_FUNCTION__))
;
3846
3847 // Check if a registers match their sub/super register classes.
3848 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3849 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3850 if (AOp1.getRegEqualityTy() == EqualsSubReg)
3851 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3852 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3853 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3854 if (AOp2.getRegEqualityTy() == EqualsSubReg)
3855 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3856
3857 return false;
3858}
3859
3860/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3861/// operands.
3862bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3863 StringRef Name, SMLoc NameLoc,
3864 OperandVector &Operands) {
3865 MCAsmParser &Parser = getParser();
3866 Name = StringSwitch<StringRef>(Name.lower())
3867 .Case("beq", "b.eq")
3868 .Case("bne", "b.ne")
3869 .Case("bhs", "b.hs")
3870 .Case("bcs", "b.cs")
3871 .Case("blo", "b.lo")
3872 .Case("bcc", "b.cc")
3873 .Case("bmi", "b.mi")
3874 .Case("bpl", "b.pl")
3875 .Case("bvs", "b.vs")
3876 .Case("bvc", "b.vc")
3877 .Case("bhi", "b.hi")
3878 .Case("bls", "b.ls")
3879 .Case("bge", "b.ge")
3880 .Case("blt", "b.lt")
3881 .Case("bgt", "b.gt")
3882 .Case("ble", "b.le")
3883 .Case("bal", "b.al")
3884 .Case("bnv", "b.nv")
3885 .Default(Name);
3886
3887 // First check for the AArch64-specific .req directive.
3888 if (Parser.getTok().is(AsmToken::Identifier) &&
3889 Parser.getTok().getIdentifier().lower() == ".req") {
3890 parseDirectiveReq(Name, NameLoc);
3891 // We always return 'error' for this, as we're done with this
3892 // statement and don't need to match the 'instruction."
3893 return true;
3894 }
3895
3896 // Create the leading tokens for the mnemonic, split by '.' characters.
3897 size_t Start = 0, Next = Name.find('.');
3898 StringRef Head = Name.slice(Start, Next);
3899
3900 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
3901 // the SYS instruction.
3902 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
3903 Head == "cfp" || Head == "dvp" || Head == "cpp")
3904 return parseSysAlias(Head, NameLoc, Operands);
3905
3906 Operands.push_back(
3907 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3908 Mnemonic = Head;
3909
3910 // Handle condition codes for a branch mnemonic
3911 if (Head == "b" && Next != StringRef::npos) {
3912 Start = Next;
3913 Next = Name.find('.', Start + 1);
3914 Head = Name.slice(Start + 1, Next);
3915
3916 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3917 (Head.data() - Name.data()));
3918 AArch64CC::CondCode CC = parseCondCodeString(Head);
3919 if (CC == AArch64CC::Invalid)
3920 return Error(SuffixLoc, "invalid condition code");
3921 Operands.push_back(
3922 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3923 Operands.push_back(
3924 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3925 }
3926
3927 // Add the remaining tokens in the mnemonic.
3928 while (Next != StringRef::npos) {
3929 Start = Next;
3930 Next = Name.find('.', Start + 1);
3931 Head = Name.slice(Start, Next);
3932 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3933 (Head.data() - Name.data()) + 1);
3934 Operands.push_back(
3935 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3936 }
3937
3938 // Conditional compare instructions have a Condition Code operand, which needs
3939 // to be parsed and an immediate operand created.
3940 bool condCodeFourthOperand =
3941 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3942 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3943 Head == "csinc" || Head == "csinv" || Head == "csneg");
3944
3945 // These instructions are aliases to some of the conditional select
3946 // instructions. However, the condition code is inverted in the aliased
3947 // instruction.
3948 //
3949 // FIXME: Is this the correct way to handle these? Or should the parser
3950 // generate the aliased instructions directly?
3951 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3952 bool condCodeThirdOperand =
3953 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3954
3955 // Read the remaining operands.
3956 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3957
3958 unsigned N = 1;
3959 do {
3960 // Parse and remember the operand.
3961 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3962 (N == 3 && condCodeThirdOperand) ||
3963 (N == 2 && condCodeSecondOperand),
3964 condCodeSecondOperand || condCodeThirdOperand)) {
3965 return true;
3966 }
3967
3968 // After successfully parsing some operands there are two special cases to
3969 // consider (i.e. notional operands not separated by commas). Both are due
3970 // to memory specifiers:
3971 // + An RBrac will end an address for load/store/prefetch
3972 // + An '!' will indicate a pre-indexed operation.
3973 //
3974 // It's someone else's responsibility to make sure these tokens are sane
3975 // in the given context!
3976
3977 SMLoc RLoc = Parser.getTok().getLoc();
3978 if (parseOptionalToken(AsmToken::RBrac))
3979 Operands.push_back(
3980 AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3981 SMLoc ELoc = Parser.getTok().getLoc();
3982 if (parseOptionalToken(AsmToken::Exclaim))
3983 Operands.push_back(
3984 AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3985
3986 ++N;
3987 } while (parseOptionalToken(AsmToken::Comma));
3988 }
3989
3990 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3991 return true;
3992
3993 return false;
3994}
3995
3996static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
3997 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31))(((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31
)) ? static_cast<void> (0) : __assert_fail ("(ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3997, __PRETTY_FUNCTION__))
;
3998 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
3999 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
4000 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
4001 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
4002 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
4003 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
4004}
4005
4006// FIXME: This entire function is a giant hack to provide us with decent
4007// operand range validation/diagnostics until TableGen/MC can be extended
4008// to support autogeneration of this kind of validation.
4009bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
4010 SmallVectorImpl<SMLoc> &Loc) {
4011 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4012 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
4013
4014 // A prefix only applies to the instruction following it. Here we extract
4015 // prefix information for the next instruction before validating the current
4016 // one so that in the case of failure we don't erronously continue using the
4017 // current prefix.
4018 PrefixInfo Prefix = NextPrefix;
4019 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
4020
4021 // Before validating the instruction in isolation we run through the rules
4022 // applicable when it follows a prefix instruction.
4023 // NOTE: brk & hlt can be prefixed but require no additional validation.
4024 if (Prefix.isActive() &&
4025 (Inst.getOpcode() != AArch64::BRK) &&
4026 (Inst.getOpcode() != AArch64::HLT)) {
4027
4028 // Prefixed intructions must have a destructive operand.
4029 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
4030 AArch64::NotDestructive)
4031 return Error(IDLoc, "instruction is unpredictable when following a"
4032 " movprfx, suggest replacing movprfx with mov");
4033
4034 // Destination operands must match.
4035 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
4036 return Error(Loc[0], "instruction is unpredictable when following a"
4037 " movprfx writing to a different destination");
4038
4039 // Destination operand must not be used in any other location.
4040 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
4041 if (Inst.getOperand(i).isReg() &&
4042 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
4043 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
4044 return Error(Loc[0], "instruction is unpredictable when following a"
4045 " movprfx and destination also used as non-destructive"
4046 " source");
4047 }
4048
4049 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
4050 if (Prefix.isPredicated()) {
4051 int PgIdx = -1;
4052
4053 // Find the instructions general predicate.
4054 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
4055 if (Inst.getOperand(i).isReg() &&
4056 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
4057 PgIdx = i;
4058 break;
4059 }
4060
4061 // Instruction must be predicated if the movprfx is predicated.
4062 if (PgIdx == -1 ||
4063 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
4064 return Error(IDLoc, "instruction is unpredictable when following a"
4065 " predicated movprfx, suggest using unpredicated movprfx");
4066
4067 // Instruction must use same general predicate as the movprfx.
4068 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
4069 return Error(IDLoc, "instruction is unpredictable when following a"
4070 " predicated movprfx using a different general predicate");
4071
4072 // Instruction element type must match the movprfx.
4073 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
4074 return Error(IDLoc, "instruction is unpredictable when following a"
4075 " predicated movprfx with a different element size");
4076 }
4077 }
4078
4079 // Check for indexed addressing modes w/ the base register being the
4080 // same as a destination/source register or pair load where
4081 // the Rt == Rt2. All of those are undefined behaviour.
4082 switch (Inst.getOpcode()) {
4083 case AArch64::LDPSWpre:
4084 case AArch64::LDPWpost:
4085 case AArch64::LDPWpre:
4086 case AArch64::LDPXpost:
4087 case AArch64::LDPXpre: {
4088 unsigned Rt = Inst.getOperand(1).getReg();
4089 unsigned Rt2 = Inst.getOperand(2).getReg();
4090 unsigned Rn = Inst.getOperand(3).getReg();
4091 if (RI->isSubRegisterEq(Rn, Rt))
4092 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
4093 "is also a destination");
4094 if (RI->isSubRegisterEq(Rn, Rt2))
4095 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
4096 "is also a destination");
4097 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4098 }
4099 case AArch64::LDPDi:
4100 case AArch64::LDPQi:
4101 case AArch64::LDPSi:
4102 case AArch64::LDPSWi:
4103 case AArch64::LDPWi:
4104 case AArch64::LDPXi: {
4105 unsigned Rt = Inst.getOperand(0).getReg();
4106 unsigned Rt2 = Inst.getOperand(1).getReg();
4107 if (Rt == Rt2)
4108 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4109 break;
4110 }
4111 case AArch64::LDPDpost:
4112 case AArch64::LDPDpre:
4113 case AArch64::LDPQpost:
4114 case AArch64::LDPQpre:
4115 case AArch64::LDPSpost:
4116 case AArch64::LDPSpre:
4117 case AArch64::LDPSWpost: {
4118 unsigned Rt = Inst.getOperand(1).getReg();
4119 unsigned Rt2 = Inst.getOperand(2).getReg();
4120 if (Rt == Rt2)
4121 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4122 break;
4123 }
4124 case AArch64::STPDpost:
4125 case AArch64::STPDpre:
4126 case AArch64::STPQpost:
4127 case AArch64::STPQpre:
4128 case AArch64::STPSpost:
4129 case AArch64::STPSpre:
4130 case AArch64::STPWpost:
4131 case AArch64::STPWpre:
4132 case AArch64::STPXpost:
4133 case AArch64::STPXpre: {
4134 unsigned Rt = Inst.getOperand(1).getReg();
4135 unsigned Rt2 = Inst.getOperand(2).getReg();
4136 unsigned Rn = Inst.getOperand(3).getReg();
4137 if (RI->isSubRegisterEq(Rn, Rt))
4138 return Error(Loc[0], "unpredictable STP instruction, writeback base "
4139 "is also a source");
4140 if (RI->isSubRegisterEq(Rn, Rt2))
4141 return Error(Loc[1], "unpredictable STP instruction, writeback base "
4142 "is also a source");
4143 break;
4144 }
4145 case AArch64::LDRBBpre:
4146 case AArch64::LDRBpre:
4147 case AArch64::LDRHHpre:
4148 case AArch64::LDRHpre:
4149 case AArch64::LDRSBWpre:
4150 case AArch64::LDRSBXpre:
4151 case AArch64::LDRSHWpre:
4152 case AArch64::LDRSHXpre:
4153 case AArch64::LDRSWpre:
4154 case AArch64::LDRWpre:
4155 case AArch64::LDRXpre:
4156 case AArch64::LDRBBpost:
4157 case AArch64::LDRBpost:
4158 case AArch64::LDRHHpost:
4159 case AArch64::LDRHpost:
4160 case AArch64::LDRSBWpost:
4161 case AArch64::LDRSBXpost:
4162 case AArch64::LDRSHWpost:
4163 case AArch64::LDRSHXpost:
4164 case AArch64::LDRSWpost:
4165 case AArch64::LDRWpost:
4166 case AArch64::LDRXpost: {
4167 unsigned Rt = Inst.getOperand(1).getReg();
4168 unsigned Rn = Inst.getOperand(2).getReg();
4169 if (RI->isSubRegisterEq(Rn, Rt))
4170 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4171 "is also a source");
4172 break;
4173 }
4174 case AArch64::STRBBpost:
4175 case AArch64::STRBpost:
4176 case AArch64::STRHHpost:
4177 case AArch64::STRHpost:
4178 case AArch64::STRWpost:
4179 case AArch64::STRXpost:
4180 case AArch64::STRBBpre:
4181 case AArch64::STRBpre:
4182 case AArch64::STRHHpre:
4183 case AArch64::STRHpre:
4184 case AArch64::STRWpre:
4185 case AArch64::STRXpre: {
4186 unsigned Rt = Inst.getOperand(1).getReg();
4187 unsigned Rn = Inst.getOperand(2).getReg();
4188 if (RI->isSubRegisterEq(Rn, Rt))
4189 return Error(Loc[0], "unpredictable STR instruction, writeback base "
4190 "is also a source");
4191 break;
4192 }
4193 case AArch64::STXRB:
4194 case AArch64::STXRH:
4195 case AArch64::STXRW:
4196 case AArch64::STXRX:
4197 case AArch64::STLXRB:
4198 case AArch64::STLXRH:
4199 case AArch64::STLXRW:
4200 case AArch64::STLXRX: {
4201 unsigned Rs = Inst.getOperand(0).getReg();
4202 unsigned Rt = Inst.getOperand(1).getReg();
4203 unsigned Rn = Inst.getOperand(2).getReg();
4204 if (RI->isSubRegisterEq(Rt, Rs) ||
4205 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4206 return Error(Loc[0],
4207 "unpredictable STXR instruction, status is also a source");
4208 break;
4209 }
4210 case AArch64::STXPW:
4211 case AArch64::STXPX:
4212 case AArch64::STLXPW:
4213 case AArch64::STLXPX: {
4214 unsigned Rs = Inst.getOperand(0).getReg();
4215 unsigned Rt1 = Inst.getOperand(1).getReg();
4216 unsigned Rt2 = Inst.getOperand(2).getReg();
4217 unsigned Rn = Inst.getOperand(3).getReg();
4218 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4219 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4220 return Error(Loc[0],
4221 "unpredictable STXP instruction, status is also a source");
4222 break;
4223 }
4224 case AArch64::LDRABwriteback:
4225 case AArch64::LDRAAwriteback: {
4226 unsigned Xt = Inst.getOperand(0).getReg();
4227 unsigned Xn = Inst.getOperand(1).getReg();
4228 if (Xt == Xn)
4229 return Error(Loc[0],
4230 "unpredictable LDRA instruction, writeback base"
4231 " is also a destination");
4232 break;
4233 }
4234 }
4235
4236
4237 // Now check immediate ranges. Separate from the above as there is overlap
4238 // in the instructions being checked and this keeps the nested conditionals
4239 // to a minimum.
4240 switch (Inst.getOpcode()) {
4241 case AArch64::ADDSWri:
4242 case AArch64::ADDSXri:
4243 case AArch64::ADDWri:
4244 case AArch64::ADDXri:
4245 case AArch64::SUBSWri:
4246 case AArch64::SUBSXri:
4247 case AArch64::SUBWri:
4248 case AArch64::SUBXri: {
4249 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4250 // some slight duplication here.
4251 if (Inst.getOperand(2).isExpr()) {
4252 const MCExpr *Expr = Inst.getOperand(2).getExpr();
4253 AArch64MCExpr::VariantKind ELFRefKind;
4254 MCSymbolRefExpr::VariantKind DarwinRefKind;
4255 int64_t Addend;
4256 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4257
4258 // Only allow these with ADDXri.
4259 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4260 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4261 Inst.getOpcode() == AArch64::ADDXri)
4262 return false;
4263
4264 // Only allow these with ADDXri/ADDWri
4265 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4266 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4267 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4268 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4269 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4270 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4271 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4272 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4273 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4274 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4275 (Inst.getOpcode() == AArch64::ADDXri ||
4276 Inst.getOpcode() == AArch64::ADDWri))
4277 return false;
4278
4279 // Don't allow symbol refs in the immediate field otherwise
4280 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4281 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4282 // 'cmp w0, 'borked')
4283 return Error(Loc.back(), "invalid immediate expression");
4284 }
4285 // We don't validate more complex expressions here
4286 }
4287 return false;
4288 }
4289 default:
4290 return false;
4291 }
4292}
4293
4294static std::string AArch64MnemonicSpellCheck(StringRef S,
4295 const FeatureBitset &FBS,
4296 unsigned VariantID = 0);
4297
4298bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4299 uint64_t ErrorInfo,
4300 OperandVector &Operands) {
4301 switch (ErrCode) {
4302 case Match_InvalidTiedOperand: {
4303 RegConstraintEqualityTy EqTy =
4304 static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4305 .getRegEqualityTy();
4306 switch (EqTy) {
4307 case RegConstraintEqualityTy::EqualsSubReg:
4308 return Error(Loc, "operand must be 64-bit form of destination register");
4309 case RegConstraintEqualityTy::EqualsSuperReg:
4310 return Error(Loc, "operand must be 32-bit form of destination register");
4311 case RegConstraintEqualityTy::EqualsReg:
4312 return Error(Loc, "operand must match destination register");
4313 }
4314 llvm_unreachable("Unknown RegConstraintEqualityTy")::llvm::llvm_unreachable_internal("Unknown RegConstraintEqualityTy"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4314)
;
4315 }
4316 case Match_MissingFeature:
4317 return Error(Loc,
4318 "instruction requires a CPU feature not currently enabled");
4319 case Match_InvalidOperand:
4320 return Error(Loc, "invalid operand for instruction");
4321 case Match_InvalidSuffix:
4322 return Error(Loc, "invalid type suffix for instruction");
4323 case Match_InvalidCondCode:
4324 return Error(Loc, "expected AArch64 condition code");
4325 case Match_AddSubRegExtendSmall:
4326 return Error(Loc,
4327 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4328 case Match_AddSubRegExtendLarge:
4329 return Error(Loc,
4330 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4331 case Match_AddSubSecondSource:
4332 return Error(Loc,
4333 "expected compatible register, symbol or integer in range [0, 4095]");
4334 case Match_LogicalSecondSource:
4335 return Error(Loc, "expected compatible register or logical immediate");
4336 case Match_InvalidMovImm32Shift:
4337 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4338 case Match_InvalidMovImm64Shift:
4339 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4340 case Match_AddSubRegShift32:
4341 return Error(Loc,
4342 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4343 case Match_AddSubRegShift64:
4344 return Error(Loc,
4345 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4346 case Match_InvalidFPImm:
4347 return Error(Loc,
4348 "expected compatible register or floating-point constant");
4349 case Match_InvalidMemoryIndexedSImm6:
4350 return Error(Loc, "index must be an integer in range [-32, 31].");
4351 case Match_InvalidMemoryIndexedSImm5:
4352 return Error(Loc, "index must be an integer in range [-16, 15].");
4353 case Match_InvalidMemoryIndexed1SImm4:
4354 return Error(Loc, "index must be an integer in range [-8, 7].");
4355 case Match_InvalidMemoryIndexed2SImm4:
4356 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4357 case Match_InvalidMemoryIndexed3SImm4:
4358 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4359 case Match_InvalidMemoryIndexed4SImm4:
4360 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4361 case Match_InvalidMemoryIndexed16SImm4:
4362 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4363 case Match_InvalidMemoryIndexed32SImm4:
4364 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
4365 case Match_InvalidMemoryIndexed1SImm6:
4366 return Error(Loc, "index must be an integer in range [-32, 31].");
4367 case Match_InvalidMemoryIndexedSImm8:
4368 return Error(Loc, "index must be an integer in range [-128, 127].");
4369 case Match_InvalidMemoryIndexedSImm9:
4370 return Error(Loc, "index must be an integer in range [-256, 255].");
4371 case Match_InvalidMemoryIndexed16SImm9:
4372 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4373 case Match_InvalidMemoryIndexed8SImm10:
4374 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4375 case Match_InvalidMemoryIndexed4SImm7:
4376 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4377 case Match_InvalidMemoryIndexed8SImm7:
4378 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4379 case Match_InvalidMemoryIndexed16SImm7:
4380 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4381 case Match_InvalidMemoryIndexed8UImm5:
4382 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4383 case Match_InvalidMemoryIndexed4UImm5:
4384 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4385 case Match_InvalidMemoryIndexed2UImm5:
4386 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4387 case Match_InvalidMemoryIndexed8UImm6:
4388 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4389 case Match_InvalidMemoryIndexed16UImm6:
4390 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4391 case Match_InvalidMemoryIndexed4UImm6:
4392 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4393 case Match_InvalidMemoryIndexed2UImm6:
4394 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4395 case Match_InvalidMemoryIndexed1UImm6:
4396 return Error(Loc, "index must be in range [0, 63].");
4397 case Match_InvalidMemoryWExtend8:
4398 return Error(Loc,
4399 "expected 'uxtw' or 'sxtw' with optional shift of #0");
4400 case Match_InvalidMemoryWExtend16:
4401 return Error(Loc,
4402 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4403 case Match_InvalidMemoryWExtend32:
4404 return Error(Loc,
4405 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4406 case Match_InvalidMemoryWExtend64:
4407 return Error(Loc,
4408 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4409 case Match_InvalidMemoryWExtend128:
4410 return Error(Loc,
4411 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4412 case Match_InvalidMemoryXExtend8:
4413 return Error(Loc,
4414 "expected 'lsl' or 'sxtx' with optional shift of #0");
4415 case Match_InvalidMemoryXExtend16:
4416 return Error(Loc,
4417 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4418 case Match_InvalidMemoryXExtend32:
4419 return Error(Loc,
4420 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4421 case Match_InvalidMemoryXExtend64:
4422 return Error(Loc,
4423 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4424 case Match_InvalidMemoryXExtend128:
4425 return Error(Loc,
4426 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4427 case Match_InvalidMemoryIndexed1:
4428 return Error(Loc, "index must be an integer in range [0, 4095].");
4429 case Match_InvalidMemoryIndexed2:
4430 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4431 case Match_InvalidMemoryIndexed4:
4432 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4433 case Match_InvalidMemoryIndexed8:
4434 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4435 case Match_InvalidMemoryIndexed16:
4436 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4437 case Match_InvalidImm0_1:
4438 return Error(Loc, "immediate must be an integer in range [0, 1].");
4439 case Match_InvalidImm0_7:
4440 return Error(Loc, "immediate must be an integer in range [0, 7].");
4441 case Match_InvalidImm0_15:
4442 return Error(Loc, "immediate must be an integer in range [0, 15].");
4443 case Match_InvalidImm0_31:
4444 return Error(Loc, "immediate must be an integer in range [0, 31].");
4445 case Match_InvalidImm0_63:
4446 return Error(Loc, "immediate must be an integer in range [0, 63].");
4447 case Match_InvalidImm0_127:
4448 return Error(Loc, "immediate must be an integer in range [0, 127].");
4449 case Match_InvalidImm0_255:
4450 return Error(Loc, "immediate must be an integer in range [0, 255].");
4451 case Match_InvalidImm0_65535:
4452 return Error(Loc, "immediate must be an integer in range [0, 65535].");
4453 case Match_InvalidImm1_8:
4454 return Error(Loc, "immediate must be an integer in range [1, 8].");
4455 case Match_InvalidImm1_16:
4456 return Error(Loc, "immediate must be an integer in range [1, 16].");
4457 case Match_InvalidImm1_32:
4458 return Error(Loc, "immediate must be an integer in range [1, 32].");
4459 case Match_InvalidImm1_64:
4460 return Error(Loc, "immediate must be an integer in range [1, 64].");
4461 case Match_InvalidSVEAddSubImm8:
4462 return Error(Loc, "immediate must be an integer in range [0, 255]"
4463 " with a shift amount of 0");
4464 case Match_InvalidSVEAddSubImm16:
4465 case Match_InvalidSVEAddSubImm32:
4466 case Match_InvalidSVEAddSubImm64:
4467 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4468 "multiple of 256 in range [256, 65280]");
4469 case Match_InvalidSVECpyImm8:
4470 return Error(Loc, "immediate must be an integer in range [-128, 255]"
4471 " with a shift amount of 0");
4472 case Match_InvalidSVECpyImm16:
4473 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4474 "multiple of 256 in range [-32768, 65280]");
4475 case Match_InvalidSVECpyImm32:
4476 case Match_InvalidSVECpyImm64:
4477 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4478 "multiple of 256 in range [-32768, 32512]");
4479 case Match_InvalidIndexRange1_1:
4480 return Error(Loc, "expected lane specifier '[1]'");
4481 case Match_InvalidIndexRange0_15:
4482 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4483 case Match_InvalidIndexRange0_7:
4484 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4485 case Match_InvalidIndexRange0_3:
4486 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4487 case Match_InvalidIndexRange0_1:
4488 return Error(Loc, "vector lane must be an integer in range [0, 1].");
4489 case Match_InvalidSVEIndexRange0_63:
4490 return Error(Loc, "vector lane must be an integer in range [0, 63].");
4491 case Match_InvalidSVEIndexRange0_31:
4492 return Error(Loc, "vector lane must be an integer in range [0, 31].");
4493 case Match_InvalidSVEIndexRange0_15:
4494 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4495 case Match_InvalidSVEIndexRange0_7:
4496 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4497 case Match_InvalidSVEIndexRange0_3:
4498 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4499 case Match_InvalidLabel:
4500 return Error(Loc, "expected label or encodable integer pc offset");
4501 case Match_MRS:
4502 return Error(Loc, "expected readable system register");
4503 case Match_MSR:
4504 return Error(Loc, "expected writable system register or pstate");
4505 case Match_InvalidComplexRotationEven:
4506 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4507 case Match_InvalidComplexRotationOdd:
4508 return Error(Loc, "complex rotation must be 90 or 270.");
4509 case Match_MnemonicFail: {
4510 std::string Suggestion = AArch64MnemonicSpellCheck(
4511 ((AArch64Operand &)*Operands[0]).getToken(),
4512 ComputeAvailableFeatures(STI->getFeatureBits()));
4513 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4514 }
4515 case Match_InvalidGPR64shifted8:
4516 return Error(Loc, "register must be x0..x30 or xzr, without shift");
4517 case Match_InvalidGPR64shifted16:
4518 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4519 case Match_InvalidGPR64shifted32:
4520 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4521 case Match_InvalidGPR64shifted64:
4522 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4523 case Match_InvalidGPR64NoXZRshifted8:
4524 return Error(Loc, "register must be x0..x30 without shift");
4525 case Match_InvalidGPR64NoXZRshifted16:
4526 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4527 case Match_InvalidGPR64NoXZRshifted32:
4528 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4529 case Match_InvalidGPR64NoXZRshifted64:
4530 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4531 case Match_InvalidZPR32UXTW8:
4532 case Match_InvalidZPR32SXTW8:
4533 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4534 case Match_InvalidZPR32UXTW16:
4535 case Match_InvalidZPR32SXTW16:
4536 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4537 case Match_InvalidZPR32UXTW32:
4538 case Match_InvalidZPR32SXTW32:
4539 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4540 case Match_InvalidZPR32UXTW64:
4541 case Match_InvalidZPR32SXTW64:
4542 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4543 case Match_InvalidZPR64UXTW8:
4544 case Match_InvalidZPR64SXTW8:
4545 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4546 case Match_InvalidZPR64UXTW16:
4547 case Match_InvalidZPR64SXTW16:
4548 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4549 case Match_InvalidZPR64UXTW32:
4550 case Match_InvalidZPR64SXTW32:
4551 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4552 case Match_InvalidZPR64UXTW64:
4553 case Match_InvalidZPR64SXTW64:
4554 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4555 case Match_InvalidZPR32LSL8:
4556 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4557 case Match_InvalidZPR32LSL16:
4558 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4559 case Match_InvalidZPR32LSL32:
4560 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4561 case Match_InvalidZPR32LSL64:
4562 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4563 case Match_InvalidZPR64LSL8:
4564 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4565 case Match_InvalidZPR64LSL16:
4566 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4567 case Match_InvalidZPR64LSL32:
4568 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4569 case Match_InvalidZPR64LSL64:
4570 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4571 case Match_InvalidZPR0:
4572 return Error(Loc, "expected register without element width suffix");
4573 case Match_InvalidZPR8:
4574 case Match_InvalidZPR16:
4575 case Match_InvalidZPR32:
4576 case Match_InvalidZPR64:
4577 case Match_InvalidZPR128:
4578 return Error(Loc, "invalid element width");
4579 case Match_InvalidZPR_3b8:
4580 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4581 case Match_InvalidZPR_3b16:
4582 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4583 case Match_InvalidZPR_3b32:
4584 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4585 case Match_InvalidZPR_4b16:
4586 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4587 case Match_InvalidZPR_4b32:
4588 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4589 case Match_InvalidZPR_4b64:
4590 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4591 case Match_InvalidSVEPattern:
4592 return Error(Loc, "invalid predicate pattern");
4593 case Match_InvalidSVEPredicateAnyReg:
4594 case Match_InvalidSVEPredicateBReg:
4595 case Match_InvalidSVEPredicateHReg:
4596 case Match_InvalidSVEPredicateSReg:
4597 case Match_InvalidSVEPredicateDReg:
4598 return Error(Loc, "invalid predicate register.");
4599 case Match_InvalidSVEPredicate3bAnyReg:
4600 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
4601 case Match_InvalidSVEPredicate3bBReg:
4602 return Error(Loc, "invalid restricted predicate register, expected p0.b..p7.b");
4603 case Match_InvalidSVEPredicate3bHReg:
4604 return Error(Loc, "invalid restricted predicate register, expected p0.h..p7.h");
4605 case Match_InvalidSVEPredicate3bSReg:
4606 return Error(Loc, "invalid restricted predicate register, expected p0.s..p7.s");
4607 case Match_InvalidSVEPredicate3bDReg:
4608 return Error(Loc, "invalid restricted predicate register, expected p0.d..p7.d");
4609 case Match_InvalidSVEExactFPImmOperandHalfOne:
4610 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4611 case Match_InvalidSVEExactFPImmOperandHalfTwo:
4612 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4613 case Match_InvalidSVEExactFPImmOperandZeroOne:
4614 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4615 default:
4616 llvm_unreachable("unexpected error code!")::llvm::llvm_unreachable_internal("unexpected error code!", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4616)
;
4617 }
4618}
4619
4620static const char *getSubtargetFeatureName(uint64_t Val);
4621
4622bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4623 OperandVector &Operands,
4624 MCStreamer &Out,
4625 uint64_t &ErrorInfo,
4626 bool MatchingInlineAsm) {
4627 assert(!Operands.empty() && "Unexpect empty operand list!")((!Operands.empty() && "Unexpect empty operand list!"
) ? static_cast<void> (0) : __assert_fail ("!Operands.empty() && \"Unexpect empty operand list!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4627, __PRETTY_FUNCTION__))
;
4628 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4629 assert(Op.isToken() && "Leading operand should always be a mnemonic!")((Op.isToken() && "Leading operand should always be a mnemonic!"
) ? static_cast<void> (0) : __assert_fail ("Op.isToken() && \"Leading operand should always be a mnemonic!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4629, __PRETTY_FUNCTION__))
;
4630
4631 StringRef Tok = Op.getToken();
4632 unsigned NumOperands = Operands.size();
4633
4634 if (NumOperands == 4 && Tok == "lsl") {
4635 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4636 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4637 if (Op2.isScalarReg() && Op3.isImm()) {
4638 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4639 if (Op3CE) {
4640 uint64_t Op3Val = Op3CE->getValue();
4641 uint64_t NewOp3Val = 0;
4642 uint64_t NewOp4Val = 0;
4643 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4644 Op2.getReg())) {
4645 NewOp3Val = (32 - Op3Val) & 0x1f;
4646 NewOp4Val = 31 - Op3Val;
4647 } else {
4648 NewOp3Val = (64 - Op3Val) & 0x3f;
4649 NewOp4Val = 63 - Op3Val;
4650 }
4651
4652 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4653 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4654
4655 Operands[0] = AArch64Operand::CreateToken(
4656 "ubfm", false, Op.getStartLoc(), getContext());
4657 Operands.push_back(AArch64Operand::CreateImm(
4658 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4659 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4660 Op3.getEndLoc(), getContext());
4661 }
4662 }
4663 } else if (NumOperands == 4 && Tok == "bfc") {
4664 // FIXME: Horrible hack to handle BFC->BFM alias.
4665 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4666 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4667 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4668
4669 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4670 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4671 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4672
4673 if (LSBCE && WidthCE) {
4674 uint64_t LSB = LSBCE->getValue();
4675 uint64_t Width = WidthCE->getValue();
4676
4677 uint64_t RegWidth = 0;
4678 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4679 Op1.getReg()))
4680 RegWidth = 64;
4681 else
4682 RegWidth = 32;
4683
4684 if (LSB >= RegWidth)
4685 return Error(LSBOp.getStartLoc(),
4686 "expected integer in range [0, 31]");
4687 if (Width < 1 || Width > RegWidth)
4688 return Error(WidthOp.getStartLoc(),
4689 "expected integer in range [1, 32]");
4690
4691 uint64_t ImmR = 0;
4692 if (RegWidth == 32)
4693 ImmR = (32 - LSB) & 0x1f;
4694 else
4695 ImmR = (64 - LSB) & 0x3f;
4696
4697 uint64_t ImmS = Width - 1;
4698
4699 if (ImmR != 0 && ImmS >= ImmR)
4700 return Error(WidthOp.getStartLoc(),
4701 "requested insert overflows register");
4702
4703 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4704 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4705 Operands[0] = AArch64Operand::CreateToken(
4706 "bfm", false, Op.getStartLoc(), getContext());
4707 Operands[2] = AArch64Operand::CreateReg(
4708 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4709 SMLoc(), SMLoc(), getContext());
4710 Operands[3] = AArch64Operand::CreateImm(
4711 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4712 Operands.emplace_back(
4713 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4714 WidthOp.getEndLoc(), getContext()));
4715 }
4716 }
4717 } else if (NumOperands == 5) {
4718 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4719 // UBFIZ -> UBFM aliases.
4720 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4721 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4722 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4723 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4724
4725 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4726 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4727 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4728
4729 if (Op3CE && Op4CE) {
4730 uint64_t Op3Val = Op3CE->getValue();
4731 uint64_t Op4Val = Op4CE->getValue();
4732
4733 uint64_t RegWidth = 0;
4734 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4735 Op1.getReg()))
4736 RegWidth = 64;
4737 else
4738 RegWidth = 32;
4739
4740 if (Op3Val >= RegWidth)
4741 return Error(Op3.getStartLoc(),
4742 "expected integer in range [0, 31]");
4743 if (Op4Val < 1 || Op4Val > RegWidth)
4744 return Error(Op4.getStartLoc(),
4745 "expected integer in range [1, 32]");
4746
4747 uint64_t NewOp3Val = 0;
4748 if (RegWidth == 32)
4749 NewOp3Val = (32 - Op3Val) & 0x1f;
4750 else
4751 NewOp3Val = (64 - Op3Val) & 0x3f;
4752
4753 uint64_t NewOp4Val = Op4Val - 1;
4754
4755 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4756 return Error(Op4.getStartLoc(),
4757 "requested insert overflows register");
4758
4759 const MCExpr *NewOp3 =
4760 MCConstantExpr::create(NewOp3Val, getContext());
4761 const MCExpr *NewOp4 =
4762 MCConstantExpr::create(NewOp4Val, getContext());
4763 Operands[3] = AArch64Operand::CreateImm(
4764 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4765 Operands[4] = AArch64Operand::CreateImm(
4766 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4767 if (Tok == "bfi")
4768 Operands[0] = AArch64Operand::CreateToken(
4769 "bfm", false, Op.getStartLoc(), getContext());
4770 else if (Tok == "sbfiz")
4771 Operands[0] = AArch64Operand::CreateToken(
4772 "sbfm", false, Op.getStartLoc(), getContext());
4773 else if (Tok == "ubfiz")
4774 Operands[0] = AArch64Operand::CreateToken(
4775 "ubfm", false, Op.getStartLoc(), getContext());
4776 else
4777 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4777)
;
4778 }
4779 }
4780
4781 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4782 // UBFX -> UBFM aliases.
4783 } else if (NumOperands == 5 &&
4784 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4785 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4786 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4787 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4788
4789 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4790 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4791 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4792
4793 if (Op3CE && Op4CE) {
4794 uint64_t Op3Val = Op3CE->getValue();
4795 uint64_t Op4Val = Op4CE->getValue();
4796
4797 uint64_t RegWidth = 0;
4798 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4799 Op1.getReg()))
4800 RegWidth = 64;
4801 else
4802 RegWidth = 32;
4803
4804 if (Op3Val >= RegWidth)
4805 return Error(Op3.getStartLoc(),
4806 "expected integer in range [0, 31]");
4807 if (Op4Val < 1 || Op4Val > RegWidth)
4808 return Error(Op4.getStartLoc(),
4809 "expected integer in range [1, 32]");
4810
4811 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4812
4813 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4814 return Error(Op4.getStartLoc(),
4815 "requested extract overflows register");
4816
4817 const MCExpr *NewOp4 =
4818 MCConstantExpr::create(NewOp4Val, getContext());
4819 Operands[4] = AArch64Operand::CreateImm(
4820 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4821 if (Tok == "bfxil")
4822 Operands[0] = AArch64Operand::CreateToken(
4823 "bfm", false, Op.getStartLoc(), getContext());
4824 else if (Tok == "sbfx")
4825 Operands[0] = AArch64Operand::CreateToken(
4826 "sbfm", false, Op.getStartLoc(), getContext());
4827 else if (Tok == "ubfx")
4828 Operands[0] = AArch64Operand::CreateToken(
4829 "ubfm", false, Op.getStartLoc(), getContext());
4830 else
4831 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4831)
;
4832 }
4833 }
4834 }
4835 }
4836
4837 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4838 // instruction for FP registers correctly in some rare circumstances. Convert
4839 // it to a safe instruction and warn (because silently changing someone's
4840 // assembly is rude).
4841 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4842 NumOperands == 4 && Tok == "movi") {
4843 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4844 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4845 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4846 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4847 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4848 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4849 if (Suffix.lower() == ".2d" &&
4850 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4851 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4852 " correctly on this CPU, converting to equivalent movi.16b");
4853 // Switch the suffix to .16b.
4854 unsigned Idx = Op1.isToken() ? 1 : 2;
4855 Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4856 getContext());
4857 }
4858 }
4859 }
4860
4861 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4862 // InstAlias can't quite handle this since the reg classes aren't
4863 // subclasses.
4864 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4865 // The source register can be Wn here, but the matcher expects a
4866 // GPR64. Twiddle it here if necessary.
4867 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4868 if (Op.isScalarReg()) {
4869 unsigned Reg = getXRegFromWReg(Op.getReg());
4870 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4871 Op.getStartLoc(), Op.getEndLoc(),
4872 getContext());
4873 }
4874 }
4875 // FIXME: Likewise for sxt[bh] with a Xd dst operand
4876 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4877 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4878 if (Op.isScalarReg() &&
4879 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4880 Op.getReg())) {
4881 // The source register can be Wn here, but the matcher expects a
4882 // GPR64. Twiddle it here if necessary.
4883 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4884 if (Op.isScalarReg()) {
4885 unsigned Reg = getXRegFromWReg(Op.getReg());
4886 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4887 Op.getStartLoc(),
4888 Op.getEndLoc(), getContext());
4889 }
4890 }
4891 }
4892 // FIXME: Likewise for uxt[bh] with a Xd dst operand
4893 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4894 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4895 if (Op.isScalarReg() &&
4896 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4897 Op.getReg())) {
4898 // The source register can be Wn here, but the matcher expects a
4899 // GPR32. Twiddle it here if necessary.
4900 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4901 if (Op.isScalarReg()) {
4902 unsigned Reg = getWRegFromXReg(Op.getReg());
4903 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4904 Op.getStartLoc(),
4905 Op.getEndLoc(), getContext());
4906 }
4907 }
4908 }
4909
4910 MCInst Inst;
4911 FeatureBitset MissingFeatures;
4912 // First try to match against the secondary set of tables containing the
4913 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4914 unsigned MatchResult =
4915 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4916 MatchingInlineAsm, 1);
4917
4918 // If that fails, try against the alternate table containing long-form NEON:
4919 // "fadd v0.2s, v1.2s, v2.2s"
4920 if (MatchResult != Match_Success) {
4921 // But first, save the short-form match result: we can use it in case the
4922 // long-form match also fails.
4923 auto ShortFormNEONErrorInfo = ErrorInfo;
4924 auto ShortFormNEONMatchResult = MatchResult;
4925 auto ShortFormNEONMissingFeatures = MissingFeatures;
4926
4927 MatchResult =
4928 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4929 MatchingInlineAsm, 0);
4930
4931 // Now, both matches failed, and the long-form match failed on the mnemonic
4932 // suffix token operand. The short-form match failure is probably more
4933 // relevant: use it instead.
4934 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4935 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4936 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4937 MatchResult = ShortFormNEONMatchResult;
4938 ErrorInfo = ShortFormNEONErrorInfo;
4939 MissingFeatures = ShortFormNEONMissingFeatures;
4940 }
4941 }
4942
4943 switch (MatchResult) {
4944 case Match_Success: {
4945 // Perform range checking and other semantic validations
4946 SmallVector<SMLoc, 8> OperandLocs;
4947 NumOperands = Operands.size();
4948 for (unsigned i = 1; i < NumOperands; ++i)
4949 OperandLocs.push_back(Operands[i]->getStartLoc());
4950 if (validateInstruction(Inst, IDLoc, OperandLocs))
4951 return true;
4952
4953 Inst.setLoc(IDLoc);
4954 Out.emitInstruction(Inst, getSTI());
4955 return false;
4956 }
4957 case Match_MissingFeature: {
4958 assert(MissingFeatures.any() && "Unknown missing feature!")((MissingFeatures.any() && "Unknown missing feature!"
) ? static_cast<void> (0) : __assert_fail ("MissingFeatures.any() && \"Unknown missing feature!\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4958, __PRETTY_FUNCTION__))
;
4959 // Special case the error message for the very common case where only
4960 // a single subtarget feature is missing (neon, e.g.).
4961 std::string Msg = "instruction requires:";
4962 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
4963 if (MissingFeatures[i]) {
4964 Msg += " ";
4965 Msg += getSubtargetFeatureName(i);
4966 }
4967 }
4968 return Error(IDLoc, Msg);
4969 }
4970 case Match_MnemonicFail:
4971 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
4972 case Match_InvalidOperand: {
4973 SMLoc ErrorLoc = IDLoc;
4974
4975 if (ErrorInfo != ~0ULL) {
4976 if (ErrorInfo >= Operands.size())
4977 return Error(IDLoc, "too few operands for instruction",
4978 SMRange(IDLoc, getTok().getLoc()));
4979
4980 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4981 if (ErrorLoc == SMLoc())
4982 ErrorLoc = IDLoc;
4983 }
4984 // If the match failed on a suffix token operand, tweak the diagnostic
4985 // accordingly.
4986 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4987 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4988 MatchResult = Match_InvalidSuffix;
4989
4990 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4991 }
4992 case Match_InvalidTiedOperand:
4993 case Match_InvalidMemoryIndexed1:
4994 case Match_InvalidMemoryIndexed2:
4995 case Match_InvalidMemoryIndexed4:
4996 case Match_InvalidMemoryIndexed8:
4997 case Match_InvalidMemoryIndexed16:
4998 case Match_InvalidCondCode:
4999 case Match_AddSubRegExtendSmall:
5000 case Match_AddSubRegExtendLarge:
5001 case Match_AddSubSecondSource:
5002 case Match_LogicalSecondSource:
5003 case Match_AddSubRegShift32:
5004 case Match_AddSubRegShift64:
5005 case Match_InvalidMovImm32Shift:
5006 case Match_InvalidMovImm64Shift:
5007 case Match_InvalidFPImm:
5008 case Match_InvalidMemoryWExtend8:
5009 case Match_InvalidMemoryWExtend16:
5010 case Match_InvalidMemoryWExtend32:
5011 case Match_InvalidMemoryWExtend64:
5012 case Match_InvalidMemoryWExtend128:
5013 case Match_InvalidMemoryXExtend8:
5014 case Match_InvalidMemoryXExtend16:
5015 case Match_InvalidMemoryXExtend32:
5016 case Match_InvalidMemoryXExtend64:
5017 case Match_InvalidMemoryXExtend128:
5018 case Match_InvalidMemoryIndexed1SImm4:
5019 case Match_InvalidMemoryIndexed2SImm4:
5020 case Match_InvalidMemoryIndexed3SImm4:
5021 case Match_InvalidMemoryIndexed4SImm4:
5022 case Match_InvalidMemoryIndexed1SImm6:
5023 case Match_InvalidMemoryIndexed16SImm4:
5024 case Match_InvalidMemoryIndexed32SImm4:
5025 case Match_InvalidMemoryIndexed4SImm7:
5026 case Match_InvalidMemoryIndexed8SImm7:
5027 case Match_InvalidMemoryIndexed16SImm7:
5028 case Match_InvalidMemoryIndexed8UImm5:
5029 case Match_InvalidMemoryIndexed4UImm5:
5030 case Match_InvalidMemoryIndexed2UImm5:
5031 case Match_InvalidMemoryIndexed1UImm6:
5032 case Match_InvalidMemoryIndexed2UImm6:
5033 case Match_InvalidMemoryIndexed4UImm6:
5034 case Match_InvalidMemoryIndexed8UImm6:
5035 case Match_InvalidMemoryIndexed16UImm6:
5036 case Match_InvalidMemoryIndexedSImm6:
5037 case Match_InvalidMemoryIndexedSImm5:
5038 case Match_InvalidMemoryIndexedSImm8:
5039 case Match_InvalidMemoryIndexedSImm9:
5040 case Match_InvalidMemoryIndexed16SImm9:
5041 case Match_InvalidMemoryIndexed8SImm10:
5042 case Match_InvalidImm0_1:
5043 case Match_InvalidImm0_7:
5044 case Match_InvalidImm0_15:
5045 case Match_InvalidImm0_31:
5046 case Match_InvalidImm0_63:
5047 case Match_InvalidImm0_127:
5048 case Match_InvalidImm0_255:
5049 case Match_InvalidImm0_65535:
5050 case Match_InvalidImm1_8:
5051 case Match_InvalidImm1_16:
5052 case Match_InvalidImm1_32:
5053 case Match_InvalidImm1_64:
5054 case Match_InvalidSVEAddSubImm8:
5055 case Match_InvalidSVEAddSubImm16:
5056 case Match_InvalidSVEAddSubImm32:
5057 case Match_InvalidSVEAddSubImm64:
5058 case Match_InvalidSVECpyImm8:
5059 case Match_InvalidSVECpyImm16:
5060 case Match_InvalidSVECpyImm32:
5061 case Match_InvalidSVECpyImm64:
5062 case Match_InvalidIndexRange1_1:
5063 case Match_InvalidIndexRange0_15:
5064 case Match_InvalidIndexRange0_7:
5065 case Match_InvalidIndexRange0_3:
5066 case Match_InvalidIndexRange0_1:
5067 case Match_InvalidSVEIndexRange0_63:
5068 case Match_InvalidSVEIndexRange0_31:
5069 case Match_InvalidSVEIndexRange0_15:
5070 case Match_InvalidSVEIndexRange0_7:
5071 case Match_InvalidSVEIndexRange0_3:
5072 case Match_InvalidLabel:
5073 case Match_InvalidComplexRotationEven:
5074 case Match_InvalidComplexRotationOdd:
5075 case Match_InvalidGPR64shifted8:
5076 case Match_InvalidGPR64shifted16:
5077 case Match_InvalidGPR64shifted32:
5078 case Match_InvalidGPR64shifted64:
5079 case Match_InvalidGPR64NoXZRshifted8:
5080 case Match_InvalidGPR64NoXZRshifted16:
5081 case Match_InvalidGPR64NoXZRshifted32:
5082 case Match_InvalidGPR64NoXZRshifted64:
5083 case Match_InvalidZPR32UXTW8:
5084 case Match_InvalidZPR32UXTW16:
5085 case Match_InvalidZPR32UXTW32:
5086 case Match_InvalidZPR32UXTW64:
5087 case Match_InvalidZPR32SXTW8:
5088 case Match_InvalidZPR32SXTW16:
5089 case Match_InvalidZPR32SXTW32:
5090 case Match_InvalidZPR32SXTW64:
5091 case Match_InvalidZPR64UXTW8:
5092 case Match_InvalidZPR64SXTW8:
5093 case Match_InvalidZPR64UXTW16:
5094 case Match_InvalidZPR64SXTW16:
5095 case Match_InvalidZPR64UXTW32:
5096 case Match_InvalidZPR64SXTW32:
5097 case Match_InvalidZPR64UXTW64:
5098 case Match_InvalidZPR64SXTW64:
5099 case Match_InvalidZPR32LSL8:
5100 case Match_InvalidZPR32LSL16:
5101 case Match_InvalidZPR32LSL32:
5102 case Match_InvalidZPR32LSL64:
5103 case Match_InvalidZPR64LSL8:
5104 case Match_InvalidZPR64LSL16:
5105 case Match_InvalidZPR64LSL32:
5106 case Match_InvalidZPR64LSL64:
5107 case Match_InvalidZPR0:
5108 case Match_InvalidZPR8:
5109 case Match_InvalidZPR16:
5110 case Match_InvalidZPR32:
5111 case Match_InvalidZPR64:
5112 case Match_InvalidZPR128:
5113 case Match_InvalidZPR_3b8:
5114 case Match_InvalidZPR_3b16:
5115 case Match_InvalidZPR_3b32:
5116 case Match_InvalidZPR_4b16:
5117 case Match_InvalidZPR_4b32:
5118 case Match_InvalidZPR_4b64:
5119 case Match_InvalidSVEPredicateAnyReg:
5120 case Match_InvalidSVEPattern:
5121 case Match_InvalidSVEPredicateBReg:
5122 case Match_InvalidSVEPredicateHReg:
5123 case Match_InvalidSVEPredicateSReg:
5124 case Match_InvalidSVEPredicateDReg:
5125 case Match_InvalidSVEPredicate3bAnyReg:
5126 case Match_InvalidSVEPredicate3bBReg:
5127 case Match_InvalidSVEPredicate3bHReg:
5128 case Match_InvalidSVEPredicate3bSReg:
5129 case Match_InvalidSVEPredicate3bDReg:
5130 case Match_InvalidSVEExactFPImmOperandHalfOne:
5131 case Match_InvalidSVEExactFPImmOperandHalfTwo:
5132 case Match_InvalidSVEExactFPImmOperandZeroOne:
5133 case Match_MSR:
5134 case Match_MRS: {
5135 if (ErrorInfo >= Operands.size())
5136 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5137 // Any time we get here, there's nothing fancy to do. Just get the
5138 // operand SMLoc and display the diagnostic.
5139 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5140 if (ErrorLoc == SMLoc())
5141 ErrorLoc = IDLoc;
5142 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5143 }
5144 }
5145
5146 llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5146)
;
5147}
5148
5149/// ParseDirective parses the arm specific directives
5150bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5151 const MCObjectFileInfo::Environment Format =
5152 getContext().getObjectFileInfo()->getObjectFileType();
5153 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
1
Assuming 'Format' is not equal to IsMachO
5154 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
2
Assuming 'Format' is equal to IsCOFF
5155
5156 auto IDVal = DirectiveID.getIdentifier().lower();
5157 SMLoc Loc = DirectiveID.getLoc();
5158 if (IDVal == ".arch")
3
Taking false branch
5159 parseDirectiveArch(Loc);
5160 else if (IDVal == ".cpu")
4
Taking false branch
5161 parseDirectiveCPU(Loc);
5162 else if (IDVal == ".tlsdesccall")
5
Taking false branch
5163 parseDirectiveTLSDescCall(Loc);
5164 else if (IDVal == ".ltorg" || IDVal == ".pool")
6
Taking false branch
5165 parseDirectiveLtorg(Loc);
5166 else if (IDVal == ".unreq")
7
Taking false branch
5167 parseDirectiveUnreq(Loc);
5168 else if (IDVal == ".inst")
8
Taking false branch
5169 parseDirectiveInst(Loc);
5170 else if (IDVal == ".cfi_negate_ra_state")
9
Taking false branch
5171 parseDirectiveCFINegateRAState();
5172 else if (IDVal == ".cfi_b_key_frame")
10
Taking false branch
5173 parseDirectiveCFIBKeyFrame();
5174 else if (IDVal == ".arch_extension")
11
Taking false branch
5175 parseDirectiveArchExtension(Loc);
5176 else if (IDVal == ".variant_pcs")
12
Taking false branch
5177 parseDirectiveVariantPCS(Loc);
5178 else if (IsMachO
12.1
'IsMachO' is false
) {
13
Taking false branch
5179 if (IDVal == MCLOHDirectiveName())
5180 parseDirectiveLOH(IDVal, Loc);
5181 else
5182 return true;
5183 } else if (IsCOFF
13.1
'IsCOFF' is true
) {
14
Taking true branch
5184 if (IDVal == ".seh_stackalloc")
15
Taking true branch
5185 parseDirectiveSEHAllocStack(Loc);
16
Calling 'AArch64AsmParser::parseDirectiveSEHAllocStack'
5186 else if (IDVal == ".seh_endprologue")
5187 parseDirectiveSEHPrologEnd(Loc);
5188 else if (IDVal == ".seh_save_r19r20_x")
5189 parseDirectiveSEHSaveR19R20X(Loc);
5190 else if (IDVal == ".seh_save_fplr")
5191 parseDirectiveSEHSaveFPLR(Loc);
5192 else if (IDVal == ".seh_save_fplr_x")
5193 parseDirectiveSEHSaveFPLRX(Loc);
5194 else if (IDVal == ".seh_save_reg")
5195 parseDirectiveSEHSaveReg(Loc);
5196 else if (IDVal == ".seh_save_reg_x")
5197 parseDirectiveSEHSaveRegX(Loc);
5198 else if (IDVal == ".seh_save_regp")
5199 parseDirectiveSEHSaveRegP(Loc);
5200 else if (IDVal == ".seh_save_regp_x")
5201 parseDirectiveSEHSaveRegPX(Loc);
5202 else if (IDVal == ".seh_save_lrpair")
5203 parseDirectiveSEHSaveLRPair(Loc);
5204 else if (IDVal == ".seh_save_freg")
5205 parseDirectiveSEHSaveFReg(Loc);
5206 else if (IDVal == ".seh_save_freg_x")
5207 parseDirectiveSEHSaveFRegX(Loc);
5208 else if (IDVal == ".seh_save_fregp")
5209 parseDirectiveSEHSaveFRegP(Loc);
5210 else if (IDVal == ".seh_save_fregp_x")
5211 parseDirectiveSEHSaveFRegPX(Loc);
5212 else if (IDVal == ".seh_set_fp")
5213 parseDirectiveSEHSetFP(Loc);
5214 else if (IDVal == ".seh_add_fp")
5215 parseDirectiveSEHAddFP(Loc);
5216 else if (IDVal == ".seh_nop")
5217 parseDirectiveSEHNop(Loc);
5218 else if (IDVal == ".seh_save_next")
5219 parseDirectiveSEHSaveNext(Loc);
5220 else if (IDVal == ".seh_startepilogue")
5221 parseDirectiveSEHEpilogStart(Loc);
5222 else if (IDVal == ".seh_endepilogue")
5223 parseDirectiveSEHEpilogEnd(Loc);
5224 else if (IDVal == ".seh_trap_frame")
5225 parseDirectiveSEHTrapFrame(Loc);
5226 else if (IDVal == ".seh_pushframe")
5227 parseDirectiveSEHMachineFrame(Loc);
5228 else if (IDVal == ".seh_context")
5229 parseDirectiveSEHContext(Loc);
5230 else if (IDVal == ".seh_clear_unwound_to_call")
5231 parseDirectiveSEHClearUnwoundToCall(Loc);
5232 else
5233 return true;
5234 } else
5235 return true;
5236 return false;
5237}
5238
5239static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
5240 SmallVector<StringRef, 4> &RequestedExtensions) {
5241 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
5242 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
5243
5244 if (!NoCrypto && Crypto) {
5245 switch (ArchKind) {
5246 default:
5247 // Map 'generic' (and others) to sha2 and aes, because
5248 // that was the traditional meaning of crypto.
5249 case AArch64::ArchKind::ARMV8_1A:
5250 case AArch64::ArchKind::ARMV8_2A:
5251 case AArch64::ArchKind::ARMV8_3A:
5252 RequestedExtensions.push_back("sha2");
5253 RequestedExtensions.push_back("aes");
5254 break;
5255 case AArch64::ArchKind::ARMV8_4A:
5256 case AArch64::ArchKind::ARMV8_5A:
5257 case AArch64::ArchKind::ARMV8_6A:
5258 case AArch64::ArchKind::ARMV8R:
5259 RequestedExtensions.push_back("sm4");
5260 RequestedExtensions.push_back("sha3");
5261 RequestedExtensions.push_back("sha2");
5262 RequestedExtensions.push_back("aes");
5263 break;
5264 }
5265 } else if (NoCrypto) {
5266 switch (ArchKind) {
5267 default:
5268 // Map 'generic' (and others) to sha2 and aes, because
5269 // that was the traditional meaning of crypto.
5270 case AArch64::ArchKind::ARMV8_1A:
5271 case AArch64::ArchKind::ARMV8_2A:
5272 case AArch64::ArchKind::ARMV8_3A:
5273 RequestedExtensions.push_back("nosha2");
5274 RequestedExtensions.push_back("noaes");
5275 break;
5276 case AArch64::ArchKind::ARMV8_4A:
5277 case AArch64::ArchKind::ARMV8_5A:
5278 case AArch64::ArchKind::ARMV8_6A:
5279 RequestedExtensions.push_back("nosm4");
5280 RequestedExtensions.push_back("nosha3");
5281 RequestedExtensions.push_back("nosha2");
5282 RequestedExtensions.push_back("noaes");
5283 break;
5284 }
5285 }
5286}
5287
5288/// parseDirectiveArch
5289/// ::= .arch token
5290bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
5291 SMLoc ArchLoc = getLoc();
5292
5293 StringRef Arch, ExtensionString;
5294 std::tie(Arch, ExtensionString) =
5295 getParser().parseStringToEndOfStatement().trim().split('+');
5296
5297 AArch64::ArchKind ID = AArch64::parseArch(Arch);
5298 if (ID == AArch64::ArchKind::INVALID)
5299 return Error(ArchLoc, "unknown arch name");
5300
5301 if (parseToken(AsmToken::EndOfStatement))
5302 return true;
5303
5304 // Get the architecture and extension features.
5305 std::vector<StringRef> AArch64Features;
5306 AArch64::getArchFeatures(ID, AArch64Features);
5307 AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
5308 AArch64Features);
5309
5310 MCSubtargetInfo &STI = copySTI();
5311 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
5312 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
5313 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
5314
5315 SmallVector<StringRef, 4> RequestedExtensions;
5316 if (!ExtensionString.empty())
5317 ExtensionString.split(RequestedExtensions, '+');
5318
5319 ExpandCryptoAEK(ID, RequestedExtensions);
5320
5321 FeatureBitset Features = STI.getFeatureBits();
5322 for (auto Name : RequestedExtensions) {
5323 bool EnableFeature = true;
5324
5325 if (Name.startswith_lower("no")) {
5326 EnableFeature = false;
5327 Name = Name.substr(2);
5328 }
5329
5330 for (const auto &Extension : ExtensionMap) {
5331 if (Extension.Name != Name)
5332 continue;
5333
5334 if (Extension.Features.none())
5335 report_fatal_error("unsupported architectural extension: " + Name);
5336
5337 FeatureBitset ToggleFeatures = EnableFeature
5338 ? (~Features & Extension.Features)
5339 : ( Features & Extension.Features);
5340 FeatureBitset Features =
5341 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5342 setAvailableFeatures(Features);
5343 break;
5344 }
5345 }
5346 return false;
5347}
5348
5349/// parseDirectiveArchExtension
5350/// ::= .arch_extension [no]feature
5351bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
5352 SMLoc ExtLoc = getLoc();
5353
5354 StringRef Name = getParser().parseStringToEndOfStatement().trim();
5355
5356 if (parseToken(AsmToken::EndOfStatement,
5357 "unexpected token in '.arch_extension' directive"))
5358 return true;
5359
5360 bool EnableFeature = true;
5361 if (Name.startswith_lower("no")) {
5362 EnableFeature = false;
5363 Name = Name.substr(2);
5364 }
5365
5366 MCSubtargetInfo &STI = copySTI();
5367 FeatureBitset Features = STI.getFeatureBits();
5368 for (const auto &Extension : ExtensionMap) {
5369 if (Extension.Name != Name)
5370 continue;
5371
5372 if (Extension.Features.none())
5373 return Error(ExtLoc, "unsupported architectural extension: " + Name);
5374
5375 FeatureBitset ToggleFeatures = EnableFeature
5376 ? (~Features & Extension.Features)
5377 : (Features & Extension.Features);
5378 FeatureBitset Features =
5379 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5380 setAvailableFeatures(Features);
5381 return false;
5382 }
5383
5384 return Error(ExtLoc, "unknown architectural extension: " + Name);
5385}
5386
5387static SMLoc incrementLoc(SMLoc L, int Offset) {
5388 return SMLoc::getFromPointer(L.getPointer() + Offset);
5389}
5390
5391/// parseDirectiveCPU
5392/// ::= .cpu id
5393bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5394 SMLoc CurLoc = getLoc();
5395
5396 StringRef CPU, ExtensionString;
5397 std::tie(CPU, ExtensionString) =
5398 getParser().parseStringToEndOfStatement().trim().split('+');
5399
5400 if (parseToken(AsmToken::EndOfStatement))
5401 return true;
5402
5403 SmallVector<StringRef, 4> RequestedExtensions;
5404 if (!ExtensionString.empty())
5405 ExtensionString.split(RequestedExtensions, '+');
5406
5407 // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5408 // once that is tablegen'ed
5409 if (!getSTI().isCPUStringValid(CPU)) {
5410 Error(CurLoc, "unknown CPU name");
5411 return false;
5412 }
5413
5414 MCSubtargetInfo &STI = copySTI();
5415 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
5416 CurLoc = incrementLoc(CurLoc, CPU.size());
5417
5418 ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5419
5420 FeatureBitset Features = STI.getFeatureBits();
5421 for (auto Name : RequestedExtensions) {
5422 // Advance source location past '+'.
5423 CurLoc = incrementLoc(CurLoc, 1);
5424
5425 bool EnableFeature = true;
5426
5427 if (Name.startswith_lower("no")) {
5428 EnableFeature = false;
5429 Name = Name.substr(2);
5430 }
5431
5432 bool FoundExtension = false;
5433 for (const auto &Extension : ExtensionMap) {
5434 if (Extension.Name != Name)
5435 continue;
5436
5437 if (Extension.Features.none())
5438 report_fatal_error("unsupported architectural extension: " + Name);
5439
5440 FeatureBitset ToggleFeatures = EnableFeature
5441 ? (~Features & Extension.Features)
5442 : ( Features & Extension.Features);
5443 FeatureBitset Features =
5444 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5445 setAvailableFeatures(Features);
5446 FoundExtension = true;
5447
5448 break;
5449 }
5450
5451 if (!FoundExtension)
5452 Error(CurLoc, "unsupported architectural extension");
5453
5454 CurLoc = incrementLoc(CurLoc, Name.size());
5455 }
5456 return false;
5457}
5458
5459/// parseDirectiveInst
5460/// ::= .inst opcode [, ...]
5461bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5462 if (getLexer().is(AsmToken::EndOfStatement))
5463 return Error(Loc, "expected expression following '.inst' directive");
5464
5465 auto parseOp = [&]() -> bool {
5466 SMLoc L = getLoc();
5467 const MCExpr *Expr = nullptr;
5468 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5469 return true;
5470 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5471 if (check(!Value, L, "expected constant expression"))
5472 return true;
5473 getTargetStreamer().emitInst(Value->getValue());
5474 return false;
5475 };
5476
5477 if (parseMany(parseOp))
5478 return addErrorSuffix(" in '.inst' directive");
5479 return false;
5480}
5481
5482// parseDirectiveTLSDescCall:
5483// ::= .tlsdesccall symbol
5484bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5485 StringRef Name;
5486 if (check(getParser().parseIdentifier(Name), L,
5487 "expected symbol after directive") ||
5488 parseToken(AsmToken::EndOfStatement))
5489 return true;
5490
5491 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5492 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5493 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5494
5495 MCInst Inst;
5496 Inst.setOpcode(AArch64::TLSDESCCALL);
5497 Inst.addOperand(MCOperand::createExpr(Expr));
5498
5499 getParser().getStreamer().emitInstruction(Inst, getSTI());
5500 return false;
5501}
5502
5503/// ::= .loh <lohName | lohId> label1, ..., labelN
5504/// The number of arguments depends on the loh identifier.
5505bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5506 MCLOHType Kind;
5507 if (getParser().getTok().isNot(AsmToken::Identifier)) {
5508 if (getParser().getTok().isNot(AsmToken::Integer))
5509 return TokError("expected an identifier or a number in directive");
5510 // We successfully get a numeric value for the identifier.
5511 // Check if it is valid.
5512 int64_t Id = getParser().getTok().getIntVal();
5513 if (Id <= -1U && !isValidMCLOHType(Id))
5514 return TokError("invalid numeric identifier in directive");
5515 Kind = (MCLOHType)Id;
5516 } else {
5517 StringRef Name = getTok().getIdentifier();
5518 // We successfully parse an identifier.
5519 // Check if it is a recognized one.
5520 int Id = MCLOHNameToId(Name);
5521
5522 if (Id == -1)
5523 return TokError("invalid identifier in directive");
5524 Kind = (MCLOHType)Id;
5525 }
5526 // Consume the identifier.
5527 Lex();
5528 // Get the number of arguments of this LOH.
5529 int NbArgs = MCLOHIdToNbArgs(Kind);
5530
5531 assert(NbArgs != -1 && "Invalid number of arguments")((NbArgs != -1 && "Invalid number of arguments") ? static_cast
<void> (0) : __assert_fail ("NbArgs != -1 && \"Invalid number of arguments\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5531, __PRETTY_FUNCTION__))
;
5532
5533 SmallVector<MCSymbol *, 3> Args;
5534 for (int Idx = 0; Idx < NbArgs; ++Idx) {
5535 StringRef Name;
5536 if (getParser().parseIdentifier(Name))
5537 return TokError("expected identifier in directive");
5538 Args.push_back(getContext().getOrCreateSymbol(Name));
5539
5540 if (Idx + 1 == NbArgs)
5541 break;
5542 if (parseToken(AsmToken::Comma,
5543 "unexpected token in '" + Twine(IDVal) + "' directive"))
5544 return true;
5545 }
5546 if (parseToken(AsmToken::EndOfStatement,
5547 "unexpected token in '" + Twine(IDVal) + "' directive"))
5548 return true;
5549
5550 getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
5551 return false;
5552}
5553
5554/// parseDirectiveLtorg
5555/// ::= .ltorg | .pool
5556bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5557 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5558 return true;
5559 getTargetStreamer().emitCurrentConstantPool();
5560 return false;
5561}
5562
5563/// parseDirectiveReq
5564/// ::= name .req registername
5565bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5566 MCAsmParser &Parser = getParser();
5567 Parser.Lex(); // Eat the '.req' token.
5568 SMLoc SRegLoc = getLoc();
5569 RegKind RegisterKind = RegKind::Scalar;
5570 unsigned RegNum;
5571 OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5572
5573 if (ParseRes != MatchOperand_Success) {
5574 StringRef Kind;
5575 RegisterKind = RegKind::NeonVector;
5576 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5577
5578 if (ParseRes == MatchOperand_ParseFail)
5579 return true;
5580
5581 if (ParseRes == MatchOperand_Success && !Kind.empty())
5582 return Error(SRegLoc, "vector register without type specifier expected");
5583 }
5584
5585 if (ParseRes != MatchOperand_Success) {
5586 StringRef Kind;
5587 RegisterKind = RegKind::SVEDataVector;
5588 ParseRes =
5589 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5590
5591 if (ParseRes == MatchOperand_ParseFail)
5592 return true;
5593
5594 if (ParseRes == MatchOperand_Success && !Kind.empty())
5595 return Error(SRegLoc,
5596 "sve vector register without type specifier expected");
5597 }
5598
5599 if (ParseRes != MatchOperand_Success) {
5600 StringRef Kind;
5601 RegisterKind = RegKind::SVEPredicateVector;
5602 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5603
5604 if (ParseRes == MatchOperand_ParseFail)
5605 return true;
5606
5607 if (ParseRes == MatchOperand_Success && !Kind.empty())
5608 return Error(SRegLoc,
5609 "sve predicate register without type specifier expected");
5610 }
5611
5612 if (ParseRes != MatchOperand_Success)
5613 return Error(SRegLoc, "register name or alias expected");
5614
5615 // Shouldn't be anything else.
5616 if (parseToken(AsmToken::EndOfStatement,
5617 "unexpected input in .req directive"))
5618 return true;
5619
5620 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
5621 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
5622 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
5623
5624 return false;
5625}
5626
5627/// parseDirectiveUneq
5628/// ::= .unreq registername
5629bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
5630 MCAsmParser &Parser = getParser();
5631 if (getTok().isNot(AsmToken::Identifier))
5632 return TokError("unexpected input in .unreq directive.");
5633 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
5634 Parser.Lex(); // Eat the identifier.
5635 if (parseToken(AsmToken::EndOfStatement))
5636 return addErrorSuffix("in '.unreq' directive");
5637 return false;
5638}
5639
5640bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
5641 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5642 return true;
5643 getStreamer().emitCFINegateRAState();
5644 return false;
5645}
5646
5647/// parseDirectiveCFIBKeyFrame
5648/// ::= .cfi_b_key
5649bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
5650 if (parseToken(AsmToken::EndOfStatement,
5651 "unexpected token in '.cfi_b_key_frame'"))
5652 return true;
5653 getStreamer().emitCFIBKeyFrame();
5654 return false;
5655}
5656
5657/// parseDirectiveVariantPCS
5658/// ::= .variant_pcs symbolname
5659bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
5660 MCAsmParser &Parser = getParser();
5661
5662 const AsmToken &Tok = Parser.getTok();
5663 if (Tok.isNot(AsmToken::Identifier))
5664 return TokError("expected symbol name");
5665
5666 StringRef SymbolName = Tok.getIdentifier();
5667
5668 MCSymbol *Sym = getContext().lookupSymbol(SymbolName);
5669 if (!Sym)
5670 return TokError("unknown symbol in '.variant_pcs' directive");
5671
5672 Parser.Lex(); // Eat the symbol
5673
5674 // Shouldn't be any more tokens
5675 if (parseToken(AsmToken::EndOfStatement))
5676 return addErrorSuffix(" in '.variant_pcs' directive");
5677
5678 getTargetStreamer().emitDirectiveVariantPCS(Sym);
5679
5680 return false;
5681}
5682
5683/// parseDirectiveSEHAllocStack
5684/// ::= .seh_stackalloc
5685bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
5686 int64_t Size;
5687 if (parseImmExpr(Size))
17
Calling 'AArch64AsmParser::parseImmExpr'
5688 return true;
5689 getTargetStreamer().EmitARM64WinCFIAllocStack(Size);
5690 return false;
5691}
5692
5693/// parseDirectiveSEHPrologEnd
5694/// ::= .seh_endprologue
5695bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
5696 getTargetStreamer().EmitARM64WinCFIPrologEnd();
5697 return false;
5698}
5699
5700/// parseDirectiveSEHSaveR19R20X
5701/// ::= .seh_save_r19r20_x
5702bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
5703 int64_t Offset;
5704 if (parseImmExpr(Offset))
5705 return true;
5706 getTargetStreamer().EmitARM64WinCFISaveR19R20X(Offset);
5707 return false;
5708}
5709
5710/// parseDirectiveSEHSaveFPLR
5711/// ::= .seh_save_fplr
5712bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
5713 int64_t Offset;
5714 if (parseImmExpr(Offset))
5715 return true;
5716 getTargetStreamer().EmitARM64WinCFISaveFPLR(Offset);
5717 return false;
5718}
5719
5720/// parseDirectiveSEHSaveFPLRX
5721/// ::= .seh_save_fplr_x
5722bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
5723 int64_t Offset;
5724 if (parseImmExpr(Offset))
5725 return true;
5726 getTargetStreamer().EmitARM64WinCFISaveFPLRX(Offset);
5727 return false;
5728}
5729
5730/// parseDirectiveSEHSaveReg
5731/// ::= .seh_save_reg
5732bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
5733 unsigned Reg;
5734 int64_t Offset;
5735 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
5736 parseComma() || parseImmExpr(Offset))
5737 return true;
5738 getTargetStreamer().EmitARM64WinCFISaveReg(Reg, Offset);
5739 return false;
5740}
5741
5742/// parseDirectiveSEHSaveRegX
5743/// ::= .seh_save_reg_x
5744bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
5745 unsigned Reg;
5746 int64_t Offset;
5747 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
5748 parseComma() || parseImmExpr(Offset))
5749 return true;
5750 getTargetStreamer().EmitARM64WinCFISaveRegX(Reg, Offset);
5751 return false;
5752}
5753
5754/// parseDirectiveSEHSaveRegP
5755/// ::= .seh_save_regp
5756bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
5757 unsigned Reg;
5758 int64_t Offset;
5759 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
5760 parseComma() || parseImmExpr(Offset))
5761 return true;
5762 getTargetStreamer().EmitARM64WinCFISaveRegP(Reg, Offset);
5763 return false;
5764}
5765
5766/// parseDirectiveSEHSaveRegPX
5767/// ::= .seh_save_regp_x
5768bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
5769 unsigned Reg;
5770 int64_t Offset;
5771 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
5772 parseComma() || parseImmExpr(Offset))
5773 return true;
5774 getTargetStreamer().EmitARM64WinCFISaveRegPX(Reg, Offset);
5775 return false;
5776}
5777
5778/// parseDirectiveSEHSaveLRPair
5779/// ::= .seh_save_lrpair
5780bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
5781 unsigned Reg;
5782 int64_t Offset;
5783 L = getLoc();
5784 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
5785 parseComma() || parseImmExpr(Offset))
5786 return true;
5787 if (check(((Reg - 19) % 2 != 0), L,
5788 "expected register with even offset from x19"))
5789 return true;
5790 getTargetStreamer().EmitARM64WinCFISaveLRPair(Reg, Offset);
5791 return false;
5792}
5793
5794/// parseDirectiveSEHSaveFReg
5795/// ::= .seh_save_freg
5796bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
5797 unsigned Reg;
5798 int64_t Offset;
5799 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
5800 parseComma() || parseImmExpr(Offset))
5801 return true;
5802 getTargetStreamer().EmitARM64WinCFISaveFReg(Reg, Offset);
5803 return false;
5804}
5805
5806/// parseDirectiveSEHSaveFRegX
5807/// ::= .seh_save_freg_x
5808bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
5809 unsigned Reg;
5810 int64_t Offset;
5811 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
5812 parseComma() || parseImmExpr(Offset))
5813 return true;
5814 getTargetStreamer().EmitARM64WinCFISaveFRegX(Reg, Offset);
5815 return false;
5816}
5817
5818/// parseDirectiveSEHSaveFRegP
5819/// ::= .seh_save_fregp
5820bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
5821 uns