Bug Summary

File:llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Warning:line 3808, column 15
The left operand of '==' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AArch64AsmParser.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/build-llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/build-llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/include -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/build-llvm/lib/Target/AArch64/AsmParser/.. -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/.. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/build-llvm/lib/Target/AArch64/AsmParser -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-09-17-195756-12974-1 -x c++ /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "MCTargetDesc/AArch64AddressingModes.h"
10#include "MCTargetDesc/AArch64InstPrinter.h"
11#include "MCTargetDesc/AArch64MCExpr.h"
12#include "MCTargetDesc/AArch64MCTargetDesc.h"
13#include "MCTargetDesc/AArch64TargetStreamer.h"
14#include "TargetInfo/AArch64TargetInfo.h"
15#include "AArch64InstrInfo.h"
16#include "Utils/AArch64BaseInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/StringExtras.h"
23#include "llvm/ADT/StringMap.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/StringSwitch.h"
26#include "llvm/ADT/Twine.h"
27#include "llvm/MC/MCContext.h"
28#include "llvm/MC/MCExpr.h"
29#include "llvm/MC/MCInst.h"
30#include "llvm/MC/MCLinkerOptimizationHint.h"
31#include "llvm/MC/MCObjectFileInfo.h"
32#include "llvm/MC/MCParser/MCAsmLexer.h"
33#include "llvm/MC/MCParser/MCAsmParser.h"
34#include "llvm/MC/MCParser/MCAsmParserExtension.h"
35#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
36#include "llvm/MC/MCParser/MCTargetAsmParser.h"
37#include "llvm/MC/MCRegisterInfo.h"
38#include "llvm/MC/MCStreamer.h"
39#include "llvm/MC/MCSubtargetInfo.h"
40#include "llvm/MC/MCSymbol.h"
41#include "llvm/MC/MCTargetOptions.h"
42#include "llvm/MC/SubtargetFeature.h"
43#include "llvm/MC/MCValue.h"
44#include "llvm/Support/Casting.h"
45#include "llvm/Support/Compiler.h"
46#include "llvm/Support/ErrorHandling.h"
47#include "llvm/Support/MathExtras.h"
48#include "llvm/Support/SMLoc.h"
49#include "llvm/Support/TargetParser.h"
50#include "llvm/Support/TargetRegistry.h"
51#include "llvm/Support/raw_ostream.h"
52#include <cassert>
53#include <cctype>
54#include <cstdint>
55#include <cstdio>
56#include <string>
57#include <tuple>
58#include <utility>
59#include <vector>
60
61using namespace llvm;
62
63namespace {
64
65enum class RegKind {
66 Scalar,
67 NeonVector,
68 SVEDataVector,
69 SVEPredicateVector
70};
71
72enum RegConstraintEqualityTy {
73 EqualsReg,
74 EqualsSuperReg,
75 EqualsSubReg
76};
77
78class AArch64AsmParser : public MCTargetAsmParser {
79private:
80 StringRef Mnemonic; ///< Instruction mnemonic.
81
82 // Map of register aliases registers via the .req directive.
83 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
84
85 class PrefixInfo {
86 public:
87 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
88 PrefixInfo Prefix;
89 switch (Inst.getOpcode()) {
90 case AArch64::MOVPRFX_ZZ:
91 Prefix.Active = true;
92 Prefix.Dst = Inst.getOperand(0).getReg();
93 break;
94 case AArch64::MOVPRFX_ZPmZ_B:
95 case AArch64::MOVPRFX_ZPmZ_H:
96 case AArch64::MOVPRFX_ZPmZ_S:
97 case AArch64::MOVPRFX_ZPmZ_D:
98 Prefix.Active = true;
99 Prefix.Predicated = true;
100 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
101 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 102, __PRETTY_FUNCTION__))
102 "No destructive element size set for movprfx")((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 102, __PRETTY_FUNCTION__))
;
103 Prefix.Dst = Inst.getOperand(0).getReg();
104 Prefix.Pg = Inst.getOperand(2).getReg();
105 break;
106 case AArch64::MOVPRFX_ZPzZ_B:
107 case AArch64::MOVPRFX_ZPzZ_H:
108 case AArch64::MOVPRFX_ZPzZ_S:
109 case AArch64::MOVPRFX_ZPzZ_D:
110 Prefix.Active = true;
111 Prefix.Predicated = true;
112 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
113 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 114, __PRETTY_FUNCTION__))
114 "No destructive element size set for movprfx")((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 114, __PRETTY_FUNCTION__))
;
115 Prefix.Dst = Inst.getOperand(0).getReg();
116 Prefix.Pg = Inst.getOperand(1).getReg();
117 break;
118 default:
119 break;
120 }
121
122 return Prefix;
123 }
124
125 PrefixInfo() : Active(false), Predicated(false) {}
126 bool isActive() const { return Active; }
127 bool isPredicated() const { return Predicated; }
128 unsigned getElementSize() const {
129 assert(Predicated)((Predicated) ? static_cast<void> (0) : __assert_fail (
"Predicated", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 129, __PRETTY_FUNCTION__))
;
130 return ElementSize;
131 }
132 unsigned getDstReg() const { return Dst; }
133 unsigned getPgReg() const {
134 assert(Predicated)((Predicated) ? static_cast<void> (0) : __assert_fail (
"Predicated", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 134, __PRETTY_FUNCTION__))
;
135 return Pg;
136 }
137
138 private:
139 bool Active;
140 bool Predicated;
141 unsigned ElementSize;
142 unsigned Dst;
143 unsigned Pg;
144 } NextPrefix;
145
146 AArch64TargetStreamer &getTargetStreamer() {
147 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
148 return static_cast<AArch64TargetStreamer &>(TS);
149 }
150
151 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
152
153 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
154 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
155 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
156 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
157 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
158 bool parseRegister(OperandVector &Operands);
159 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
160 bool parseNeonVectorList(OperandVector &Operands);
161 bool parseOptionalMulOperand(OperandVector &Operands);
162 bool parseOperand(OperandVector &Operands, bool isCondCode,
163 bool invertCondCode);
164 bool parseImmExpr(int64_t &Out);
165 bool parseComma();
166 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
167 unsigned Last);
168
169 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
170 OperandVector &Operands);
171
172 bool parseDirectiveArch(SMLoc L);
173 bool parseDirectiveArchExtension(SMLoc L);
174 bool parseDirectiveCPU(SMLoc L);
175 bool parseDirectiveInst(SMLoc L);
176
177 bool parseDirectiveTLSDescCall(SMLoc L);
178
179 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
180 bool parseDirectiveLtorg(SMLoc L);
181
182 bool parseDirectiveReq(StringRef Name, SMLoc L);
183 bool parseDirectiveUnreq(SMLoc L);
184 bool parseDirectiveCFINegateRAState();
185 bool parseDirectiveCFIBKeyFrame();
186
187 bool parseDirectiveSEHAllocStack(SMLoc L);
188 bool parseDirectiveSEHPrologEnd(SMLoc L);
189 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
190 bool parseDirectiveSEHSaveFPLR(SMLoc L);
191 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
192 bool parseDirectiveSEHSaveReg(SMLoc L);
193 bool parseDirectiveSEHSaveRegX(SMLoc L);
194 bool parseDirectiveSEHSaveRegP(SMLoc L);
195 bool parseDirectiveSEHSaveRegPX(SMLoc L);
196 bool parseDirectiveSEHSaveLRPair(SMLoc L);
197 bool parseDirectiveSEHSaveFReg(SMLoc L);
198 bool parseDirectiveSEHSaveFRegX(SMLoc L);
199 bool parseDirectiveSEHSaveFRegP(SMLoc L);
200 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
201 bool parseDirectiveSEHSetFP(SMLoc L);
202 bool parseDirectiveSEHAddFP(SMLoc L);
203 bool parseDirectiveSEHNop(SMLoc L);
204 bool parseDirectiveSEHSaveNext(SMLoc L);
205 bool parseDirectiveSEHEpilogStart(SMLoc L);
206 bool parseDirectiveSEHEpilogEnd(SMLoc L);
207 bool parseDirectiveSEHTrapFrame(SMLoc L);
208 bool parseDirectiveSEHMachineFrame(SMLoc L);
209 bool parseDirectiveSEHContext(SMLoc L);
210 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
211
212 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
213 SmallVectorImpl<SMLoc> &Loc);
214 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
215 OperandVector &Operands, MCStreamer &Out,
216 uint64_t &ErrorInfo,
217 bool MatchingInlineAsm) override;
218/// @name Auto-generated Match Functions
219/// {
220
221#define GET_ASSEMBLER_HEADER
222#include "AArch64GenAsmMatcher.inc"
223
224 /// }
225
226 OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
227 OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
228 RegKind MatchKind);
229 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
230 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
231 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
232 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
233 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
234 template <bool IsSVEPrefetch = false>
235 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
236 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
237 OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
238 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
239 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
240 template<bool AddFPZeroAsLiteral>
241 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
242 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
243 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
244 bool tryParseNeonVectorRegister(OperandVector &Operands);
245 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
246 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
247 template <bool ParseShiftExtend,
248 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
249 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
250 template <bool ParseShiftExtend, bool ParseSuffix>
251 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
252 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
253 template <RegKind VectorKind>
254 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
255 bool ExpectMatch = false);
256 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
257
258public:
259 enum AArch64MatchResultTy {
260 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
261#define GET_OPERAND_DIAGNOSTIC_TYPES
262#include "AArch64GenAsmMatcher.inc"
263 };
264 bool IsILP32;
265
266 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
267 const MCInstrInfo &MII, const MCTargetOptions &Options)
268 : MCTargetAsmParser(Options, STI, MII) {
269 IsILP32 = Options.getABIName() == "ilp32";
270 MCAsmParserExtension::Initialize(Parser);
271 MCStreamer &S = getParser().getStreamer();
272 if (S.getTargetStreamer() == nullptr)
273 new AArch64TargetStreamer(S);
274
275 // Alias .hword/.word/.[dx]word to the target-independent
276 // .2byte/.4byte/.8byte directives as they have the same form and
277 // semantics:
278 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
279 Parser.addAliasForDirective(".hword", ".2byte");
280 Parser.addAliasForDirective(".word", ".4byte");
281 Parser.addAliasForDirective(".dword", ".8byte");
282 Parser.addAliasForDirective(".xword", ".8byte");
283
284 // Initialize the set of available features.
285 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
286 }
287
288 bool regsEqual(const MCParsedAsmOperand &Op1,
289 const MCParsedAsmOperand &Op2) const override;
290 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
291 SMLoc NameLoc, OperandVector &Operands) override;
292 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
293 OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
294 SMLoc &EndLoc) override;
295 bool ParseDirective(AsmToken DirectiveID) override;
296 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
297 unsigned Kind) override;
298
299 static bool classifySymbolRef(const MCExpr *Expr,
300 AArch64MCExpr::VariantKind &ELFRefKind,
301 MCSymbolRefExpr::VariantKind &DarwinRefKind,
302 int64_t &Addend);
303};
304
305/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
306/// instruction.
307class AArch64Operand : public MCParsedAsmOperand {
308private:
309 enum KindTy {
310 k_Immediate,
311 k_ShiftedImm,
312 k_CondCode,
313 k_Register,
314 k_VectorList,
315 k_VectorIndex,
316 k_Token,
317 k_SysReg,
318 k_SysCR,
319 k_Prefetch,
320 k_ShiftExtend,
321 k_FPImm,
322 k_Barrier,
323 k_PSBHint,
324 k_BTIHint,
325 } Kind;
326
327 SMLoc StartLoc, EndLoc;
328
329 struct TokOp {
330 const char *Data;
331 unsigned Length;
332 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
333 };
334
335 // Separate shift/extend operand.
336 struct ShiftExtendOp {
337 AArch64_AM::ShiftExtendType Type;
338 unsigned Amount;
339 bool HasExplicitAmount;
340 };
341
342 struct RegOp {
343 unsigned RegNum;
344 RegKind Kind;
345 int ElementWidth;
346
347 // The register may be allowed as a different register class,
348 // e.g. for GPR64as32 or GPR32as64.
349 RegConstraintEqualityTy EqualityTy;
350
351 // In some cases the shift/extend needs to be explicitly parsed together
352 // with the register, rather than as a separate operand. This is needed
353 // for addressing modes where the instruction as a whole dictates the
354 // scaling/extend, rather than specific bits in the instruction.
355 // By parsing them as a single operand, we avoid the need to pass an
356 // extra operand in all CodeGen patterns (because all operands need to
357 // have an associated value), and we avoid the need to update TableGen to
358 // accept operands that have no associated bits in the instruction.
359 //
360 // An added benefit of parsing them together is that the assembler
361 // can give a sensible diagnostic if the scaling is not correct.
362 //
363 // The default is 'lsl #0' (HasExplicitAmount = false) if no
364 // ShiftExtend is specified.
365 ShiftExtendOp ShiftExtend;
366 };
367
368 struct VectorListOp {
369 unsigned RegNum;
370 unsigned Count;
371 unsigned NumElements;
372 unsigned ElementWidth;
373 RegKind RegisterKind;
374 };
375
376 struct VectorIndexOp {
377 unsigned Val;
378 };
379
380 struct ImmOp {
381 const MCExpr *Val;
382 };
383
384 struct ShiftedImmOp {
385 const MCExpr *Val;
386 unsigned ShiftAmount;
387 };
388
389 struct CondCodeOp {
390 AArch64CC::CondCode Code;
391 };
392
393 struct FPImmOp {
394 uint64_t Val; // APFloat value bitcasted to uint64_t.
395 bool IsExact; // describes whether parsed value was exact.
396 };
397
398 struct BarrierOp {
399 const char *Data;
400 unsigned Length;
401 unsigned Val; // Not the enum since not all values have names.
402 };
403
404 struct SysRegOp {
405 const char *Data;
406 unsigned Length;
407 uint32_t MRSReg;
408 uint32_t MSRReg;
409 uint32_t PStateField;
410 };
411
412 struct SysCRImmOp {
413 unsigned Val;
414 };
415
416 struct PrefetchOp {
417 const char *Data;
418 unsigned Length;
419 unsigned Val;
420 };
421
422 struct PSBHintOp {
423 const char *Data;
424 unsigned Length;
425 unsigned Val;
426 };
427
428 struct BTIHintOp {
429 const char *Data;
430 unsigned Length;
431 unsigned Val;
432 };
433
434 struct ExtendOp {
435 unsigned Val;
436 };
437
438 union {
439 struct TokOp Tok;
440 struct RegOp Reg;
441 struct VectorListOp VectorList;
442 struct VectorIndexOp VectorIndex;
443 struct ImmOp Imm;
444 struct ShiftedImmOp ShiftedImm;
445 struct CondCodeOp CondCode;
446 struct FPImmOp FPImm;
447 struct BarrierOp Barrier;
448 struct SysRegOp SysReg;
449 struct SysCRImmOp SysCRImm;
450 struct PrefetchOp Prefetch;
451 struct PSBHintOp PSBHint;
452 struct BTIHintOp BTIHint;
453 struct ShiftExtendOp ShiftExtend;
454 };
455
456 // Keep the MCContext around as the MCExprs may need manipulated during
457 // the add<>Operands() calls.
458 MCContext &Ctx;
459
460public:
461 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
462
463 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
464 Kind = o.Kind;
465 StartLoc = o.StartLoc;
466 EndLoc = o.EndLoc;
467 switch (Kind) {
468 case k_Token:
469 Tok = o.Tok;
470 break;
471 case k_Immediate:
472 Imm = o.Imm;
473 break;
474 case k_ShiftedImm:
475 ShiftedImm = o.ShiftedImm;
476 break;
477 case k_CondCode:
478 CondCode = o.CondCode;
479 break;
480 case k_FPImm:
481 FPImm = o.FPImm;
482 break;
483 case k_Barrier:
484 Barrier = o.Barrier;
485 break;
486 case k_Register:
487 Reg = o.Reg;
488 break;
489 case k_VectorList:
490 VectorList = o.VectorList;
491 break;
492 case k_VectorIndex:
493 VectorIndex = o.VectorIndex;
494 break;
495 case k_SysReg:
496 SysReg = o.SysReg;
497 break;
498 case k_SysCR:
499 SysCRImm = o.SysCRImm;
500 break;
501 case k_Prefetch:
502 Prefetch = o.Prefetch;
503 break;
504 case k_PSBHint:
505 PSBHint = o.PSBHint;
506 break;
507 case k_BTIHint:
508 BTIHint = o.BTIHint;
509 break;
510 case k_ShiftExtend:
511 ShiftExtend = o.ShiftExtend;
512 break;
513 }
514 }
515
516 /// getStartLoc - Get the location of the first token of this operand.
517 SMLoc getStartLoc() const override { return StartLoc; }
518 /// getEndLoc - Get the location of the last token of this operand.
519 SMLoc getEndLoc() const override { return EndLoc; }
520
521 StringRef getToken() const {
522 assert(Kind == k_Token && "Invalid access!")((Kind == k_Token && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 522, __PRETTY_FUNCTION__))
;
523 return StringRef(Tok.Data, Tok.Length);
524 }
525
526 bool isTokenSuffix() const {
527 assert(Kind == k_Token && "Invalid access!")((Kind == k_Token && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 527, __PRETTY_FUNCTION__))
;
528 return Tok.IsSuffix;
529 }
530
531 const MCExpr *getImm() const {
532 assert(Kind == k_Immediate && "Invalid access!")((Kind == k_Immediate && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 532, __PRETTY_FUNCTION__))
;
533 return Imm.Val;
534 }
535
536 const MCExpr *getShiftedImmVal() const {
537 assert(Kind == k_ShiftedImm && "Invalid access!")((Kind == k_ShiftedImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 537, __PRETTY_FUNCTION__))
;
538 return ShiftedImm.Val;
539 }
540
541 unsigned getShiftedImmShift() const {
542 assert(Kind == k_ShiftedImm && "Invalid access!")((Kind == k_ShiftedImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 542, __PRETTY_FUNCTION__))
;
543 return ShiftedImm.ShiftAmount;
544 }
545
546 AArch64CC::CondCode getCondCode() const {
547 assert(Kind == k_CondCode && "Invalid access!")((Kind == k_CondCode && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 547, __PRETTY_FUNCTION__))
;
548 return CondCode.Code;
549 }
550
551 APFloat getFPImm() const {
552 assert (Kind == k_FPImm && "Invalid access!")((Kind == k_FPImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 552, __PRETTY_FUNCTION__))
;
553 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
554 }
555
556 bool getFPImmIsExact() const {
557 assert (Kind == k_FPImm && "Invalid access!")((Kind == k_FPImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 557, __PRETTY_FUNCTION__))
;
558 return FPImm.IsExact;
559 }
560
561 unsigned getBarrier() const {
562 assert(Kind == k_Barrier && "Invalid access!")((Kind == k_Barrier && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 562, __PRETTY_FUNCTION__))
;
563 return Barrier.Val;
564 }
565
566 StringRef getBarrierName() const {
567 assert(Kind == k_Barrier && "Invalid access!")((Kind == k_Barrier && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 567, __PRETTY_FUNCTION__))
;
568 return StringRef(Barrier.Data, Barrier.Length);
569 }
570
571 unsigned getReg() const override {
572 assert(Kind == k_Register && "Invalid access!")((Kind == k_Register && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 572, __PRETTY_FUNCTION__))
;
573 return Reg.RegNum;
574 }
575
576 RegConstraintEqualityTy getRegEqualityTy() const {
577 assert(Kind == k_Register && "Invalid access!")((Kind == k_Register && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 577, __PRETTY_FUNCTION__))
;
578 return Reg.EqualityTy;
579 }
580
581 unsigned getVectorListStart() const {
582 assert(Kind == k_VectorList && "Invalid access!")((Kind == k_VectorList && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 582, __PRETTY_FUNCTION__))
;
583 return VectorList.RegNum;
584 }
585
586 unsigned getVectorListCount() const {
587 assert(Kind == k_VectorList && "Invalid access!")((Kind == k_VectorList && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 587, __PRETTY_FUNCTION__))
;
588 return VectorList.Count;
589 }
590
591 unsigned getVectorIndex() const {
592 assert(Kind == k_VectorIndex && "Invalid access!")((Kind == k_VectorIndex && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 592, __PRETTY_FUNCTION__))
;
593 return VectorIndex.Val;
594 }
595
596 StringRef getSysReg() const {
597 assert(Kind == k_SysReg && "Invalid access!")((Kind == k_SysReg && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 597, __PRETTY_FUNCTION__))
;
598 return StringRef(SysReg.Data, SysReg.Length);
599 }
600
601 unsigned getSysCR() const {
602 assert(Kind == k_SysCR && "Invalid access!")((Kind == k_SysCR && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 602, __PRETTY_FUNCTION__))
;
603 return SysCRImm.Val;
604 }
605
606 unsigned getPrefetch() const {
607 assert(Kind == k_Prefetch && "Invalid access!")((Kind == k_Prefetch && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 607, __PRETTY_FUNCTION__))
;
608 return Prefetch.Val;
609 }
610
611 unsigned getPSBHint() const {
612 assert(Kind == k_PSBHint && "Invalid access!")((Kind == k_PSBHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 612, __PRETTY_FUNCTION__))
;
613 return PSBHint.Val;
614 }
615
616 StringRef getPSBHintName() const {
617 assert(Kind == k_PSBHint && "Invalid access!")((Kind == k_PSBHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 617, __PRETTY_FUNCTION__))
;
618 return StringRef(PSBHint.Data, PSBHint.Length);
619 }
620
621 unsigned getBTIHint() const {
622 assert(Kind == k_BTIHint && "Invalid access!")((Kind == k_BTIHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 622, __PRETTY_FUNCTION__))
;
623 return BTIHint.Val;
624 }
625
626 StringRef getBTIHintName() const {
627 assert(Kind == k_BTIHint && "Invalid access!")((Kind == k_BTIHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 627, __PRETTY_FUNCTION__))
;
628 return StringRef(BTIHint.Data, BTIHint.Length);
629 }
630
631 StringRef getPrefetchName() const {
632 assert(Kind == k_Prefetch && "Invalid access!")((Kind == k_Prefetch && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 632, __PRETTY_FUNCTION__))
;
633 return StringRef(Prefetch.Data, Prefetch.Length);
634 }
635
636 AArch64_AM::ShiftExtendType getShiftExtendType() const {
637 if (Kind == k_ShiftExtend)
638 return ShiftExtend.Type;
639 if (Kind == k_Register)
640 return Reg.ShiftExtend.Type;
641 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 641)
;
642 }
643
644 unsigned getShiftExtendAmount() const {
645 if (Kind == k_ShiftExtend)
646 return ShiftExtend.Amount;
647 if (Kind == k_Register)
648 return Reg.ShiftExtend.Amount;
649 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 649)
;
650 }
651
652 bool hasShiftExtendAmount() const {
653 if (Kind == k_ShiftExtend)
654 return ShiftExtend.HasExplicitAmount;
655 if (Kind == k_Register)
656 return Reg.ShiftExtend.HasExplicitAmount;
657 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 657)
;
658 }
659
660 bool isImm() const override { return Kind == k_Immediate; }
661 bool isMem() const override { return false; }
662
663 bool isUImm6() const {
664 if (!isImm())
665 return false;
666 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
667 if (!MCE)
668 return false;
669 int64_t Val = MCE->getValue();
670 return (Val >= 0 && Val < 64);
671 }
672
673 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
674
675 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
676 return isImmScaled<Bits, Scale>(true);
677 }
678
679 template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
680 return isImmScaled<Bits, Scale>(false);
681 }
682
683 template <int Bits, int Scale>
684 DiagnosticPredicate isImmScaled(bool Signed) const {
685 if (!isImm())
686 return DiagnosticPredicateTy::NoMatch;
687
688 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
689 if (!MCE)
690 return DiagnosticPredicateTy::NoMatch;
691
692 int64_t MinVal, MaxVal;
693 if (Signed) {
694 int64_t Shift = Bits - 1;
695 MinVal = (int64_t(1) << Shift) * -Scale;
696 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
697 } else {
698 MinVal = 0;
699 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
700 }
701
702 int64_t Val = MCE->getValue();
703 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
704 return DiagnosticPredicateTy::Match;
705
706 return DiagnosticPredicateTy::NearMatch;
707 }
708
709 DiagnosticPredicate isSVEPattern() const {
710 if (!isImm())
711 return DiagnosticPredicateTy::NoMatch;
712 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
713 if (!MCE)
714 return DiagnosticPredicateTy::NoMatch;
715 int64_t Val = MCE->getValue();
716 if (Val >= 0 && Val < 32)
717 return DiagnosticPredicateTy::Match;
718 return DiagnosticPredicateTy::NearMatch;
719 }
720
721 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
722 AArch64MCExpr::VariantKind ELFRefKind;
723 MCSymbolRefExpr::VariantKind DarwinRefKind;
724 int64_t Addend;
725 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
726 Addend)) {
727 // If we don't understand the expression, assume the best and
728 // let the fixup and relocation code deal with it.
729 return true;
730 }
731
732 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
733 ELFRefKind == AArch64MCExpr::VK_LO12 ||
734 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
735 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
736 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
737 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
738 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
739 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
740 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
741 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
742 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
743 // Note that we don't range-check the addend. It's adjusted modulo page
744 // size when converted, so there is no "out of range" condition when using
745 // @pageoff.
746 return true;
747 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
748 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
749 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
750 return Addend == 0;
751 }
752
753 return false;
754 }
755
756 template <int Scale> bool isUImm12Offset() const {
757 if (!isImm())
758 return false;
759
760 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
761 if (!MCE)
762 return isSymbolicUImm12Offset(getImm());
763
764 int64_t Val = MCE->getValue();
765 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
766 }
767
768 template <int N, int M>
769 bool isImmInRange() const {
770 if (!isImm())
771 return false;
772 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
773 if (!MCE)
774 return false;
775 int64_t Val = MCE->getValue();
776 return (Val >= N && Val <= M);
777 }
778
779 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
780 // a logical immediate can always be represented when inverted.
781 template <typename T>
782 bool isLogicalImm() const {
783 if (!isImm())
784 return false;
785 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
786 if (!MCE)
787 return false;
788
789 int64_t Val = MCE->getValue();
790 // Avoid left shift by 64 directly.
791 uint64_t Upper = UINT64_C(-1)-1UL << (sizeof(T) * 4) << (sizeof(T) * 4);
792 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
793 if ((Val & Upper) && (Val & Upper) != Upper)
794 return false;
795
796 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
797 }
798
799 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
800
801 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
802 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
803 /// immediate that can be shifted by 'Shift'.
804 template <unsigned Width>
805 Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
806 if (isShiftedImm() && Width == getShiftedImmShift())
807 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
808 return std::make_pair(CE->getValue(), Width);
809
810 if (isImm())
811 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
812 int64_t Val = CE->getValue();
813 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
814 return std::make_pair(Val >> Width, Width);
815 else
816 return std::make_pair(Val, 0u);
817 }
818
819 return {};
820 }
821
822 bool isAddSubImm() const {
823 if (!isShiftedImm() && !isImm())
824 return false;
825
826 const MCExpr *Expr;
827
828 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
829 if (isShiftedImm()) {
830 unsigned Shift = ShiftedImm.ShiftAmount;
831 Expr = ShiftedImm.Val;
832 if (Shift != 0 && Shift != 12)
833 return false;
834 } else {
835 Expr = getImm();
836 }
837
838 AArch64MCExpr::VariantKind ELFRefKind;
839 MCSymbolRefExpr::VariantKind DarwinRefKind;
840 int64_t Addend;
841 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
842 DarwinRefKind, Addend)) {
843 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
844 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
845 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
846 || ELFRefKind == AArch64MCExpr::VK_LO12
847 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
848 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
849 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
850 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
851 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
852 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
853 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
854 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
855 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
856 }
857
858 // If it's a constant, it should be a real immediate in range.
859 if (auto ShiftedVal = getShiftedVal<12>())
860 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
861
862 // If it's an expression, we hope for the best and let the fixup/relocation
863 // code deal with it.
864 return true;
865 }
866
867 bool isAddSubImmNeg() const {
868 if (!isShiftedImm() && !isImm())
869 return false;
870
871 // Otherwise it should be a real negative immediate in range.
872 if (auto ShiftedVal = getShiftedVal<12>())
873 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
874
875 return false;
876 }
877
878 // Signed value in the range -128 to +127. For element widths of
879 // 16 bits or higher it may also be a signed multiple of 256 in the
880 // range -32768 to +32512.
881 // For element-width of 8 bits a range of -128 to 255 is accepted,
882 // since a copy of a byte can be either signed/unsigned.
883 template <typename T>
884 DiagnosticPredicate isSVECpyImm() const {
885 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
886 return DiagnosticPredicateTy::NoMatch;
887
888 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
889 std::is_same<int8_t, T>::value;
890 if (auto ShiftedImm = getShiftedVal<8>())
891 if (!(IsByte && ShiftedImm->second) &&
892 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
893 << ShiftedImm->second))
894 return DiagnosticPredicateTy::Match;
895
896 return DiagnosticPredicateTy::NearMatch;
897 }
898
899 // Unsigned value in the range 0 to 255. For element widths of
900 // 16 bits or higher it may also be a signed multiple of 256 in the
901 // range 0 to 65280.
902 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
903 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
904 return DiagnosticPredicateTy::NoMatch;
905
906 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
907 std::is_same<int8_t, T>::value;
908 if (auto ShiftedImm = getShiftedVal<8>())
909 if (!(IsByte && ShiftedImm->second) &&
910 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
911 << ShiftedImm->second))
912 return DiagnosticPredicateTy::Match;
913
914 return DiagnosticPredicateTy::NearMatch;
915 }
916
917 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
918 if (isLogicalImm<T>() && !isSVECpyImm<T>())
919 return DiagnosticPredicateTy::Match;
920 return DiagnosticPredicateTy::NoMatch;
921 }
922
923 bool isCondCode() const { return Kind == k_CondCode; }
924
925 bool isSIMDImmType10() const {
926 if (!isImm())
927 return false;
928 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
929 if (!MCE)
930 return false;
931 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
932 }
933
934 template<int N>
935 bool isBranchTarget() const {
936 if (!isImm())
937 return false;
938 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
939 if (!MCE)
940 return true;
941 int64_t Val = MCE->getValue();
942 if (Val & 0x3)
943 return false;
944 assert(N > 0 && "Branch target immediate cannot be 0 bits!")((N > 0 && "Branch target immediate cannot be 0 bits!"
) ? static_cast<void> (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 944, __PRETTY_FUNCTION__))
;
945 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
946 }
947
948 bool
949 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
950 if (!isImm())
951 return false;
952
953 AArch64MCExpr::VariantKind ELFRefKind;
954 MCSymbolRefExpr::VariantKind DarwinRefKind;
955 int64_t Addend;
956 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
957 DarwinRefKind, Addend)) {
958 return false;
959 }
960 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
961 return false;
962
963 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
964 if (ELFRefKind == AllowedModifiers[i])
965 return true;
966 }
967
968 return false;
969 }
970
971 bool isMovWSymbolG3() const {
972 return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
973 }
974
975 bool isMovWSymbolG2() const {
976 return isMovWSymbol(
977 {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
978 AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
979 AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
980 AArch64MCExpr::VK_DTPREL_G2});
981 }
982
983 bool isMovWSymbolG1() const {
984 return isMovWSymbol(
985 {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
986 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
987 AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
988 AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
989 AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
990 }
991
992 bool isMovWSymbolG0() const {
993 return isMovWSymbol(
994 {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
995 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
996 AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
997 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
998 AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
999 }
1000
1001 template<int RegWidth, int Shift>
1002 bool isMOVZMovAlias() const {
1003 if (!isImm()) return false;
1004
1005 const MCExpr *E = getImm();
1006 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1007 uint64_t Value = CE->getValue();
1008
1009 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1010 }
1011 // Only supports the case of Shift being 0 if an expression is used as an
1012 // operand
1013 return !Shift && E;
1014 }
1015
1016 template<int RegWidth, int Shift>
1017 bool isMOVNMovAlias() const {
1018 if (!isImm()) return false;
1019
1020 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1021 if (!CE) return false;
1022 uint64_t Value = CE->getValue();
1023
1024 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1025 }
1026
1027 bool isFPImm() const {
1028 return Kind == k_FPImm &&
1029 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1030 }
1031
1032 bool isBarrier() const { return Kind == k_Barrier; }
1033 bool isSysReg() const { return Kind == k_SysReg; }
1034
1035 bool isMRSSystemRegister() const {
1036 if (!isSysReg()) return false;
1037
1038 return SysReg.MRSReg != -1U;
1039 }
1040
1041 bool isMSRSystemRegister() const {
1042 if (!isSysReg()) return false;
1043 return SysReg.MSRReg != -1U;
1044 }
1045
1046 bool isSystemPStateFieldWithImm0_1() const {
1047 if (!isSysReg()) return false;
1048 return (SysReg.PStateField == AArch64PState::PAN ||
1049 SysReg.PStateField == AArch64PState::DIT ||
1050 SysReg.PStateField == AArch64PState::UAO ||
1051 SysReg.PStateField == AArch64PState::SSBS);
1052 }
1053
1054 bool isSystemPStateFieldWithImm0_15() const {
1055 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1056 return SysReg.PStateField != -1U;
1057 }
1058
1059 bool isReg() const override {
1060 return Kind == k_Register;
1061 }
1062
1063 bool isScalarReg() const {
1064 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1065 }
1066
1067 bool isNeonVectorReg() const {
1068 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1069 }
1070
1071 bool isNeonVectorRegLo() const {
1072 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1073 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1074 Reg.RegNum) ||
1075 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1076 Reg.RegNum));
1077 }
1078
1079 template <unsigned Class> bool isSVEVectorReg() const {
1080 RegKind RK;
1081 switch (Class) {
1082 case AArch64::ZPRRegClassID:
1083 case AArch64::ZPR_3bRegClassID:
1084 case AArch64::ZPR_4bRegClassID:
1085 RK = RegKind::SVEDataVector;
1086 break;
1087 case AArch64::PPRRegClassID:
1088 case AArch64::PPR_3bRegClassID:
1089 RK = RegKind::SVEPredicateVector;
1090 break;
1091 default:
1092 llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class",
"/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1092)
;
1093 }
1094
1095 return (Kind == k_Register && Reg.Kind == RK) &&
1096 AArch64MCRegisterClasses[Class].contains(getReg());
1097 }
1098
1099 template <unsigned Class> bool isFPRasZPR() const {
1100 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1101 AArch64MCRegisterClasses[Class].contains(getReg());
1102 }
1103
1104 template <int ElementWidth, unsigned Class>
1105 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1106 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1107 return DiagnosticPredicateTy::NoMatch;
1108
1109 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1110 return DiagnosticPredicateTy::Match;
1111
1112 return DiagnosticPredicateTy::NearMatch;
1113 }
1114
1115 template <int ElementWidth, unsigned Class>
1116 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1117 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1118 return DiagnosticPredicateTy::NoMatch;
1119
1120 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1121 return DiagnosticPredicateTy::Match;
1122
1123 return DiagnosticPredicateTy::NearMatch;
1124 }
1125
1126 template <int ElementWidth, unsigned Class,
1127 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1128 bool ShiftWidthAlwaysSame>
1129 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1130 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1131 if (!VectorMatch.isMatch())
1132 return DiagnosticPredicateTy::NoMatch;
1133
1134 // Give a more specific diagnostic when the user has explicitly typed in
1135 // a shift-amount that does not match what is expected, but for which
1136 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1137 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1138 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1139 ShiftExtendTy == AArch64_AM::SXTW) &&
1140 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1141 return DiagnosticPredicateTy::NoMatch;
1142
1143 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1144 return DiagnosticPredicateTy::Match;
1145
1146 return DiagnosticPredicateTy::NearMatch;
1147 }
1148
1149 bool isGPR32as64() const {
1150 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1151 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1152 }
1153
1154 bool isGPR64as32() const {
1155 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1156 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1157 }
1158
1159 bool isWSeqPair() const {
1160 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1161 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1162 Reg.RegNum);
1163 }
1164
1165 bool isXSeqPair() const {
1166 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1167 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1168 Reg.RegNum);
1169 }
1170
1171 template<int64_t Angle, int64_t Remainder>
1172 DiagnosticPredicate isComplexRotation() const {
1173 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1174
1175 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1176 if (!CE) return DiagnosticPredicateTy::NoMatch;
1177 uint64_t Value = CE->getValue();
1178
1179 if (Value % Angle == Remainder && Value <= 270)
1180 return DiagnosticPredicateTy::Match;
1181 return DiagnosticPredicateTy::NearMatch;
1182 }
1183
1184 template <unsigned RegClassID> bool isGPR64() const {
1185 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1186 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1187 }
1188
1189 template <unsigned RegClassID, int ExtWidth>
1190 DiagnosticPredicate isGPR64WithShiftExtend() const {
1191 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1192 return DiagnosticPredicateTy::NoMatch;
1193
1194 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1195 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1196 return DiagnosticPredicateTy::Match;
1197 return DiagnosticPredicateTy::NearMatch;
1198 }
1199
1200 /// Is this a vector list with the type implicit (presumably attached to the
1201 /// instruction itself)?
1202 template <RegKind VectorKind, unsigned NumRegs>
1203 bool isImplicitlyTypedVectorList() const {
1204 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1205 VectorList.NumElements == 0 &&
1206 VectorList.RegisterKind == VectorKind;
1207 }
1208
1209 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1210 unsigned ElementWidth>
1211 bool isTypedVectorList() const {
1212 if (Kind != k_VectorList)
1213 return false;
1214 if (VectorList.Count != NumRegs)
1215 return false;
1216 if (VectorList.RegisterKind != VectorKind)
1217 return false;
1218 if (VectorList.ElementWidth != ElementWidth)
1219 return false;
1220 return VectorList.NumElements == NumElements;
1221 }
1222
1223 template <int Min, int Max>
1224 DiagnosticPredicate isVectorIndex() const {
1225 if (Kind != k_VectorIndex)
1226 return DiagnosticPredicateTy::NoMatch;
1227 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1228 return DiagnosticPredicateTy::Match;
1229 return DiagnosticPredicateTy::NearMatch;
1230 }
1231
1232 bool isToken() const override { return Kind == k_Token; }
1233
1234 bool isTokenEqual(StringRef Str) const {
1235 return Kind == k_Token && getToken() == Str;
1236 }
1237 bool isSysCR() const { return Kind == k_SysCR; }
1238 bool isPrefetch() const { return Kind == k_Prefetch; }
1239 bool isPSBHint() const { return Kind == k_PSBHint; }
1240 bool isBTIHint() const { return Kind == k_BTIHint; }
1241 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1242 bool isShifter() const {
1243 if (!isShiftExtend())
1244 return false;
1245
1246 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1247 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1248 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1249 ST == AArch64_AM::MSL);
1250 }
1251
1252 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1253 if (Kind != k_FPImm)
1254 return DiagnosticPredicateTy::NoMatch;
1255
1256 if (getFPImmIsExact()) {
1257 // Lookup the immediate from table of supported immediates.
1258 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1259 assert(Desc && "Unknown enum value")((Desc && "Unknown enum value") ? static_cast<void
> (0) : __assert_fail ("Desc && \"Unknown enum value\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1259, __PRETTY_FUNCTION__))
;
1260
1261 // Calculate its FP value.
1262 APFloat RealVal(APFloat::IEEEdouble());
1263 auto StatusOrErr =
1264 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1265 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1266 llvm_unreachable("FP immediate is not exact")::llvm::llvm_unreachable_internal("FP immediate is not exact"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1266)
;
1267
1268 if (getFPImm().bitwiseIsEqual(RealVal))
1269 return DiagnosticPredicateTy::Match;
1270 }
1271
1272 return DiagnosticPredicateTy::NearMatch;
1273 }
1274
1275 template <unsigned ImmA, unsigned ImmB>
1276 DiagnosticPredicate isExactFPImm() const {
1277 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1278 if ((Res = isExactFPImm<ImmA>()))
1279 return DiagnosticPredicateTy::Match;
1280 if ((Res = isExactFPImm<ImmB>()))
1281 return DiagnosticPredicateTy::Match;
1282 return Res;
1283 }
1284
1285 bool isExtend() const {
1286 if (!isShiftExtend())
1287 return false;
1288
1289 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1290 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1291 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1292 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1293 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1294 ET == AArch64_AM::LSL) &&
1295 getShiftExtendAmount() <= 4;
1296 }
1297
1298 bool isExtend64() const {
1299 if (!isExtend())
1300 return false;
1301 // Make sure the extend expects a 32-bit source register.
1302 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1303 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1304 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1305 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1306 }
1307
1308 bool isExtendLSL64() const {
1309 if (!isExtend())
1310 return false;
1311 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1312 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1313 ET == AArch64_AM::LSL) &&
1314 getShiftExtendAmount() <= 4;
1315 }
1316
1317 template<int Width> bool isMemXExtend() const {
1318 if (!isExtend())
1319 return false;
1320 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1321 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1322 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1323 getShiftExtendAmount() == 0);
1324 }
1325
1326 template<int Width> bool isMemWExtend() const {
1327 if (!isExtend())
1328 return false;
1329 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1330 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1331 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1332 getShiftExtendAmount() == 0);
1333 }
1334
1335 template <unsigned width>
1336 bool isArithmeticShifter() const {
1337 if (!isShifter())
1338 return false;
1339
1340 // An arithmetic shifter is LSL, LSR, or ASR.
1341 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1342 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1343 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1344 }
1345
1346 template <unsigned width>
1347 bool isLogicalShifter() const {
1348 if (!isShifter())
1349 return false;
1350
1351 // A logical shifter is LSL, LSR, ASR or ROR.
1352 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1353 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1354 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1355 getShiftExtendAmount() < width;
1356 }
1357
1358 bool isMovImm32Shifter() const {
1359 if (!isShifter())
1360 return false;
1361
1362 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1363 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1364 if (ST != AArch64_AM::LSL)
1365 return false;
1366 uint64_t Val = getShiftExtendAmount();
1367 return (Val == 0 || Val == 16);
1368 }
1369
1370 bool isMovImm64Shifter() const {
1371 if (!isShifter())
1372 return false;
1373
1374 // A MOVi shifter is LSL of 0 or 16.
1375 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1376 if (ST != AArch64_AM::LSL)
1377 return false;
1378 uint64_t Val = getShiftExtendAmount();
1379 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1380 }
1381
1382 bool isLogicalVecShifter() const {
1383 if (!isShifter())
1384 return false;
1385
1386 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1387 unsigned Shift = getShiftExtendAmount();
1388 return getShiftExtendType() == AArch64_AM::LSL &&
1389 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1390 }
1391
1392 bool isLogicalVecHalfWordShifter() const {
1393 if (!isLogicalVecShifter())
1394 return false;
1395
1396 // A logical vector shifter is a left shift by 0 or 8.
1397 unsigned Shift = getShiftExtendAmount();
1398 return getShiftExtendType() == AArch64_AM::LSL &&
1399 (Shift == 0 || Shift == 8);
1400 }
1401
1402 bool isMoveVecShifter() const {
1403 if (!isShiftExtend())
1404 return false;
1405
1406 // A logical vector shifter is a left shift by 8 or 16.
1407 unsigned Shift = getShiftExtendAmount();
1408 return getShiftExtendType() == AArch64_AM::MSL &&
1409 (Shift == 8 || Shift == 16);
1410 }
1411
1412 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1413 // to LDUR/STUR when the offset is not legal for the former but is for
1414 // the latter. As such, in addition to checking for being a legal unscaled
1415 // address, also check that it is not a legal scaled address. This avoids
1416 // ambiguity in the matcher.
1417 template<int Width>
1418 bool isSImm9OffsetFB() const {
1419 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1420 }
1421
1422 bool isAdrpLabel() const {
1423 // Validation was handled during parsing, so we just sanity check that
1424 // something didn't go haywire.
1425 if (!isImm())
1426 return false;
1427
1428 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1429 int64_t Val = CE->getValue();
1430 int64_t Min = - (4096 * (1LL << (21 - 1)));
1431 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1432 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1433 }
1434
1435 return true;
1436 }
1437
1438 bool isAdrLabel() const {
1439 // Validation was handled during parsing, so we just sanity check that
1440 // something didn't go haywire.
1441 if (!isImm())
1442 return false;
1443
1444 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1445 int64_t Val = CE->getValue();
1446 int64_t Min = - (1LL << (21 - 1));
1447 int64_t Max = ((1LL << (21 - 1)) - 1);
1448 return Val >= Min && Val <= Max;
1449 }
1450
1451 return true;
1452 }
1453
1454 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1455 // Add as immediates when possible. Null MCExpr = 0.
1456 if (!Expr)
1457 Inst.addOperand(MCOperand::createImm(0));
1458 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1459 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1460 else
1461 Inst.addOperand(MCOperand::createExpr(Expr));
1462 }
1463
1464 void addRegOperands(MCInst &Inst, unsigned N) const {
1465 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1465, __PRETTY_FUNCTION__))
;
1466 Inst.addOperand(MCOperand::createReg(getReg()));
1467 }
1468
1469 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1470 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1470, __PRETTY_FUNCTION__))
;
1471 assert(((AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1472, __PRETTY_FUNCTION__))
1472 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1472, __PRETTY_FUNCTION__))
;
1473
1474 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1475 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1476 RI->getEncodingValue(getReg()));
1477
1478 Inst.addOperand(MCOperand::createReg(Reg));
1479 }
1480
1481 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1482 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1482, __PRETTY_FUNCTION__))
;
1483 assert(((AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1484, __PRETTY_FUNCTION__))
1484 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1484, __PRETTY_FUNCTION__))
;
1485
1486 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1487 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1488 RI->getEncodingValue(getReg()));
1489
1490 Inst.addOperand(MCOperand::createReg(Reg));
1491 }
1492
1493 template <int Width>
1494 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1495 unsigned Base;
1496 switch (Width) {
1497 case 8: Base = AArch64::B0; break;
1498 case 16: Base = AArch64::H0; break;
1499 case 32: Base = AArch64::S0; break;
1500 case 64: Base = AArch64::D0; break;
1501 case 128: Base = AArch64::Q0; break;
1502 default:
1503 llvm_unreachable("Unsupported width")::llvm::llvm_unreachable_internal("Unsupported width", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1503)
;
1504 }
1505 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1506 }
1507
1508 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1509 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1509, __PRETTY_FUNCTION__))
;
1510 assert(((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1511, __PRETTY_FUNCTION__))
1511 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1511, __PRETTY_FUNCTION__))
;
1512 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1513 }
1514
1515 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1516 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1516, __PRETTY_FUNCTION__))
;
1517 assert(((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1518, __PRETTY_FUNCTION__))
1518 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1518, __PRETTY_FUNCTION__))
;
1519 Inst.addOperand(MCOperand::createReg(getReg()));
1520 }
1521
1522 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1523 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1523, __PRETTY_FUNCTION__))
;
1524 Inst.addOperand(MCOperand::createReg(getReg()));
1525 }
1526
1527 enum VecListIndexType {
1528 VecListIdx_DReg = 0,
1529 VecListIdx_QReg = 1,
1530 VecListIdx_ZReg = 2,
1531 };
1532
1533 template <VecListIndexType RegTy, unsigned NumRegs>
1534 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1535 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1535, __PRETTY_FUNCTION__))
;
1536 static const unsigned FirstRegs[][5] = {
1537 /* DReg */ { AArch64::Q0,
1538 AArch64::D0, AArch64::D0_D1,
1539 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1540 /* QReg */ { AArch64::Q0,
1541 AArch64::Q0, AArch64::Q0_Q1,
1542 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1543 /* ZReg */ { AArch64::Z0,
1544 AArch64::Z0, AArch64::Z0_Z1,
1545 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1546 };
1547
1548 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs"
) ? static_cast<void> (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1549, __PRETTY_FUNCTION__))
1549 " NumRegs must be <= 4 for ZRegs")(((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs"
) ? static_cast<void> (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1549, __PRETTY_FUNCTION__))
;
1550
1551 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1552 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1553 FirstRegs[(unsigned)RegTy][0]));
1554 }
1555
1556 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1557 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1557, __PRETTY_FUNCTION__))
;
1558 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1559 }
1560
1561 template <unsigned ImmIs0, unsigned ImmIs1>
1562 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1563 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1563, __PRETTY_FUNCTION__))
;
1564 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")((bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand"
) ? static_cast<void> (0) : __assert_fail ("bool(isExactFPImm<ImmIs0, ImmIs1>()) && \"Invalid operand\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1564, __PRETTY_FUNCTION__))
;
1565 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1566 }
1567
1568 void addImmOperands(MCInst &Inst, unsigned N) const {
1569 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1569, __PRETTY_FUNCTION__))
;
1570 // If this is a pageoff symrefexpr with an addend, adjust the addend
1571 // to be only the page-offset portion. Otherwise, just add the expr
1572 // as-is.
1573 addExpr(Inst, getImm());
1574 }
1575
1576 template <int Shift>
1577 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1578 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1578, __PRETTY_FUNCTION__))
;
1579 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1580 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1581 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1582 } else if (isShiftedImm()) {
1583 addExpr(Inst, getShiftedImmVal());
1584 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1585 } else {
1586 addExpr(Inst, getImm());
1587 Inst.addOperand(MCOperand::createImm(0));
1588 }
1589 }
1590
1591 template <int Shift>
1592 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1593 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1593, __PRETTY_FUNCTION__))
;
1594 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1595 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1596 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1597 } else
1598 llvm_unreachable("Not a shifted negative immediate")::llvm::llvm_unreachable_internal("Not a shifted negative immediate"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1598)
;
1599 }
1600
1601 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1602 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1602, __PRETTY_FUNCTION__))
;
1603 Inst.addOperand(MCOperand::createImm(getCondCode()));
1604 }
1605
1606 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1607 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1607, __PRETTY_FUNCTION__))
;
1608 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1609 if (!MCE)
1610 addExpr(Inst, getImm());
1611 else
1612 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1613 }
1614
1615 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1616 addImmOperands(Inst, N);
1617 }
1618
1619 template<int Scale>
1620 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1621 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1621, __PRETTY_FUNCTION__))
;
1622 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1623
1624 if (!MCE) {
1625 Inst.addOperand(MCOperand::createExpr(getImm()));
1626 return;
1627 }
1628 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1629 }
1630
1631 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1632 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1632, __PRETTY_FUNCTION__))
;
1633 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1634 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1635 }
1636
1637 template <int Scale>
1638 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1639 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1639, __PRETTY_FUNCTION__))
;
1640 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1641 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1642 }
1643
1644 template <typename T>
1645 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1646 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1646, __PRETTY_FUNCTION__))
;
1647 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1648 std::make_unsigned_t<T> Val = MCE->getValue();
1649 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1650 Inst.addOperand(MCOperand::createImm(encoding));
1651 }
1652
1653 template <typename T>
1654 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1655 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1655, __PRETTY_FUNCTION__))
;
1656 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1657 std::make_unsigned_t<T> Val = ~MCE->getValue();
1658 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1659 Inst.addOperand(MCOperand::createImm(encoding));
1660 }
1661
1662 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1663 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1663, __PRETTY_FUNCTION__))
;
1664 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1665 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1666 Inst.addOperand(MCOperand::createImm(encoding));
1667 }
1668
1669 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1670 // Branch operands don't encode the low bits, so shift them off
1671 // here. If it's a label, however, just put it on directly as there's
1672 // not enough information now to do anything.
1673 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1673, __PRETTY_FUNCTION__))
;
1674 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1675 if (!MCE) {
1676 addExpr(Inst, getImm());
1677 return;
1678 }
1679 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1679, __PRETTY_FUNCTION__))
;
1680 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1681 }
1682
1683 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1684 // Branch operands don't encode the low bits, so shift them off
1685 // here. If it's a label, however, just put it on directly as there's
1686 // not enough information now to do anything.
1687 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1687, __PRETTY_FUNCTION__))
;
1688 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1689 if (!MCE) {
1690 addExpr(Inst, getImm());
1691 return;
1692 }
1693 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1693, __PRETTY_FUNCTION__))
;
1694 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1695 }
1696
1697 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1698 // Branch operands don't encode the low bits, so shift them off
1699 // here. If it's a label, however, just put it on directly as there's
1700 // not enough information now to do anything.
1701 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1701, __PRETTY_FUNCTION__))
;
1702 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1703 if (!MCE) {
1704 addExpr(Inst, getImm());
1705 return;
1706 }
1707 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1707, __PRETTY_FUNCTION__))
;
1708 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1709 }
1710
1711 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1712 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1712, __PRETTY_FUNCTION__))
;
1713 Inst.addOperand(MCOperand::createImm(
1714 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1715 }
1716
1717 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1718 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1718, __PRETTY_FUNCTION__))
;
1719 Inst.addOperand(MCOperand::createImm(getBarrier()));
1720 }
1721
1722 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1723 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1723, __PRETTY_FUNCTION__))
;
1724
1725 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1726 }
1727
1728 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1729 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1729, __PRETTY_FUNCTION__))
;
1730
1731 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1732 }
1733
1734 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1735 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1735, __PRETTY_FUNCTION__))
;
1736
1737 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1738 }
1739
1740 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1741 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1741, __PRETTY_FUNCTION__))
;
1742
1743 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1744 }
1745
1746 void addSysCROperands(MCInst &Inst, unsigned N) const {
1747 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1747, __PRETTY_FUNCTION__))
;
1748 Inst.addOperand(MCOperand::createImm(getSysCR()));
1749 }
1750
1751 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1752 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1752, __PRETTY_FUNCTION__))
;
1753 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1754 }
1755
1756 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1757 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1757, __PRETTY_FUNCTION__))
;
1758 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1759 }
1760
1761 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1762 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1762, __PRETTY_FUNCTION__))
;
1763 Inst.addOperand(MCOperand::createImm(getBTIHint()));
1764 }
1765
1766 void addShifterOperands(MCInst &Inst, unsigned N) const {
1767 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1767, __PRETTY_FUNCTION__))
;
1768 unsigned Imm =
1769 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1770 Inst.addOperand(MCOperand::createImm(Imm));
1771 }
1772
1773 void addExtendOperands(MCInst &Inst, unsigned N) const {
1774 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1774, __PRETTY_FUNCTION__))
;
1775 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1776 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1777 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1778 Inst.addOperand(MCOperand::createImm(Imm));
1779 }
1780
1781 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1782 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1782, __PRETTY_FUNCTION__))
;
1783 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1784 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1785 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1786 Inst.addOperand(MCOperand::createImm(Imm));
1787 }
1788
1789 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1790 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1790, __PRETTY_FUNCTION__))
;
1791 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1792 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1793 Inst.addOperand(MCOperand::createImm(IsSigned));
1794 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1795 }
1796
1797 // For 8-bit load/store instructions with a register offset, both the
1798 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1799 // they're disambiguated by whether the shift was explicit or implicit rather
1800 // than its size.
1801 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1802 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1802, __PRETTY_FUNCTION__))
;
1803 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1804 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1805 Inst.addOperand(MCOperand::createImm(IsSigned));
1806 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1807 }
1808
1809 template<int Shift>
1810 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1811 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1811, __PRETTY_FUNCTION__))
;
1812
1813 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1814 if (CE) {
1815 uint64_t Value = CE->getValue();
1816 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1817 } else {
1818 addExpr(Inst, getImm());
1819 }
1820 }
1821
1822 template<int Shift>
1823 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1824 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1824, __PRETTY_FUNCTION__))
;
1825
1826 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1827 uint64_t Value = CE->getValue();
1828 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1829 }
1830
1831 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1832 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1832, __PRETTY_FUNCTION__))
;
1833 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1834 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1835 }
1836
1837 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1838 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1838, __PRETTY_FUNCTION__))
;
1839 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1840 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1841 }
1842
1843 void print(raw_ostream &OS) const override;
1844
1845 static std::unique_ptr<AArch64Operand>
1846 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1847 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1848 Op->Tok.Data = Str.data();
1849 Op->Tok.Length = Str.size();
1850 Op->Tok.IsSuffix = IsSuffix;
1851 Op->StartLoc = S;
1852 Op->EndLoc = S;
1853 return Op;
1854 }
1855
1856 static std::unique_ptr<AArch64Operand>
1857 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1858 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1859 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1860 unsigned ShiftAmount = 0,
1861 unsigned HasExplicitAmount = false) {
1862 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1863 Op->Reg.RegNum = RegNum;
1864 Op->Reg.Kind = Kind;
1865 Op->Reg.ElementWidth = 0;
1866 Op->Reg.EqualityTy = EqTy;
1867 Op->Reg.ShiftExtend.Type = ExtTy;
1868 Op->Reg.ShiftExtend.Amount = ShiftAmount;
1869 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1870 Op->StartLoc = S;
1871 Op->EndLoc = E;
1872 return Op;
1873 }
1874
1875 static std::unique_ptr<AArch64Operand>
1876 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1877 SMLoc S, SMLoc E, MCContext &Ctx,
1878 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1879 unsigned ShiftAmount = 0,
1880 unsigned HasExplicitAmount = false) {
1881 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1883, __PRETTY_FUNCTION__))
1882 Kind == RegKind::SVEPredicateVector) &&(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1883, __PRETTY_FUNCTION__))
1883 "Invalid vector kind")(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1883, __PRETTY_FUNCTION__))
;
1884 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1885 HasExplicitAmount);
1886 Op->Reg.ElementWidth = ElementWidth;
1887 return Op;
1888 }
1889
1890 static std::unique_ptr<AArch64Operand>
1891 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1892 unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1893 MCContext &Ctx) {
1894 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
1895 Op->VectorList.RegNum = RegNum;
1896 Op->VectorList.Count = Count;
1897 Op->VectorList.NumElements = NumElements;
1898 Op->VectorList.ElementWidth = ElementWidth;
1899 Op->VectorList.RegisterKind = RegisterKind;
1900 Op->StartLoc = S;
1901 Op->EndLoc = E;
1902 return Op;
1903 }
1904
1905 static std::unique_ptr<AArch64Operand>
1906 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1907 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1908 Op->VectorIndex.Val = Idx;
1909 Op->StartLoc = S;
1910 Op->EndLoc = E;
1911 return Op;
1912 }
1913
1914 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1915 SMLoc E, MCContext &Ctx) {
1916 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
1917 Op->Imm.Val = Val;
1918 Op->StartLoc = S;
1919 Op->EndLoc = E;
1920 return Op;
1921 }
1922
1923 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1924 unsigned ShiftAmount,
1925 SMLoc S, SMLoc E,
1926 MCContext &Ctx) {
1927 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1928 Op->ShiftedImm .Val = Val;
1929 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1930 Op->StartLoc = S;
1931 Op->EndLoc = E;
1932 return Op;
1933 }
1934
1935 static std::unique_ptr<AArch64Operand>
1936 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1937 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
1938 Op->CondCode.Code = Code;
1939 Op->StartLoc = S;
1940 Op->EndLoc = E;
1941 return Op;
1942 }
1943
1944 static std::unique_ptr<AArch64Operand>
1945 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1946 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
1947 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1948 Op->FPImm.IsExact = IsExact;
1949 Op->StartLoc = S;
1950 Op->EndLoc = S;
1951 return Op;
1952 }
1953
1954 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1955 StringRef Str,
1956 SMLoc S,
1957 MCContext &Ctx) {
1958 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
1959 Op->Barrier.Val = Val;
1960 Op->Barrier.Data = Str.data();
1961 Op->Barrier.Length = Str.size();
1962 Op->StartLoc = S;
1963 Op->EndLoc = S;
1964 return Op;
1965 }
1966
1967 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1968 uint32_t MRSReg,
1969 uint32_t MSRReg,
1970 uint32_t PStateField,
1971 MCContext &Ctx) {
1972 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
1973 Op->SysReg.Data = Str.data();
1974 Op->SysReg.Length = Str.size();
1975 Op->SysReg.MRSReg = MRSReg;
1976 Op->SysReg.MSRReg = MSRReg;
1977 Op->SysReg.PStateField = PStateField;
1978 Op->StartLoc = S;
1979 Op->EndLoc = S;
1980 return Op;
1981 }
1982
1983 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1984 SMLoc E, MCContext &Ctx) {
1985 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
1986 Op->SysCRImm.Val = Val;
1987 Op->StartLoc = S;
1988 Op->EndLoc = E;
1989 return Op;
1990 }
1991
1992 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1993 StringRef Str,
1994 SMLoc S,
1995 MCContext &Ctx) {
1996 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
1997 Op->Prefetch.Val = Val;
1998 Op->Barrier.Data = Str.data();
1999 Op->Barrier.Length = Str.size();
2000 Op->StartLoc = S;
2001 Op->EndLoc = S;
2002 return Op;
2003 }
2004
2005 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2006 StringRef Str,
2007 SMLoc S,
2008 MCContext &Ctx) {
2009 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2010 Op->PSBHint.Val = Val;
2011 Op->PSBHint.Data = Str.data();
2012 Op->PSBHint.Length = Str.size();
2013 Op->StartLoc = S;
2014 Op->EndLoc = S;
2015 return Op;
2016 }
2017
2018 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2019 StringRef Str,
2020 SMLoc S,
2021 MCContext &Ctx) {
2022 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2023 Op->BTIHint.Val = Val << 1 | 32;
2024 Op->BTIHint.Data = Str.data();
2025 Op->BTIHint.Length = Str.size();
2026 Op->StartLoc = S;
2027 Op->EndLoc = S;
2028 return Op;
2029 }
2030
2031 static std::unique_ptr<AArch64Operand>
2032 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2033 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2034 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2035 Op->ShiftExtend.Type = ShOp;
2036 Op->ShiftExtend.Amount = Val;
2037 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2038 Op->StartLoc = S;
2039 Op->EndLoc = E;
2040 return Op;
2041 }
2042};
2043
2044} // end anonymous namespace.
2045
2046void AArch64Operand::print(raw_ostream &OS) const {
2047 switch (Kind) {
2048 case k_FPImm:
2049 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2050 if (!getFPImmIsExact())
2051 OS << " (inexact)";
2052 OS << ">";
2053 break;
2054 case k_Barrier: {
2055 StringRef Name = getBarrierName();
2056 if (!Name.empty())
2057 OS << "<barrier " << Name << ">";
2058 else
2059 OS << "<barrier invalid #" << getBarrier() << ">";
2060 break;
2061 }
2062 case k_Immediate:
2063 OS << *getImm();
2064 break;
2065 case k_ShiftedImm: {
2066 unsigned Shift = getShiftedImmShift();
2067 OS << "<shiftedimm ";
2068 OS << *getShiftedImmVal();
2069 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2070 break;
2071 }
2072 case k_CondCode:
2073 OS << "<condcode " << getCondCode() << ">";
2074 break;
2075 case k_VectorList: {
2076 OS << "<vectorlist ";
2077 unsigned Reg = getVectorListStart();
2078 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2079 OS << Reg + i << " ";
2080 OS << ">";
2081 break;
2082 }
2083 case k_VectorIndex:
2084 OS << "<vectorindex " << getVectorIndex() << ">";
2085 break;
2086 case k_SysReg:
2087 OS << "<sysreg: " << getSysReg() << '>';
2088 break;
2089 case k_Token:
2090 OS << "'" << getToken() << "'";
2091 break;
2092 case k_SysCR:
2093 OS << "c" << getSysCR();
2094 break;
2095 case k_Prefetch: {
2096 StringRef Name = getPrefetchName();
2097 if (!Name.empty())
2098 OS << "<prfop " << Name << ">";
2099 else
2100 OS << "<prfop invalid #" << getPrefetch() << ">";
2101 break;
2102 }
2103 case k_PSBHint:
2104 OS << getPSBHintName();
2105 break;
2106 case k_BTIHint:
2107 OS << getBTIHintName();
2108 break;
2109 case k_Register:
2110 OS << "<register " << getReg() << ">";
2111 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2112 break;
2113 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2114 case k_ShiftExtend:
2115 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2116 << getShiftExtendAmount();
2117 if (!hasShiftExtendAmount())
2118 OS << "<imp>";
2119 OS << '>';
2120 break;
2121 }
2122}
2123
2124/// @name Auto-generated Match Functions
2125/// {
2126
2127static unsigned MatchRegisterName(StringRef Name);
2128
2129/// }
2130
2131static unsigned MatchNeonVectorRegName(StringRef Name) {
2132 return StringSwitch<unsigned>(Name.lower())
2133 .Case("v0", AArch64::Q0)
2134 .Case("v1", AArch64::Q1)
2135 .Case("v2", AArch64::Q2)
2136 .Case("v3", AArch64::Q3)
2137 .Case("v4", AArch64::Q4)
2138 .Case("v5", AArch64::Q5)
2139 .Case("v6", AArch64::Q6)
2140 .Case("v7", AArch64::Q7)
2141 .Case("v8", AArch64::Q8)
2142 .Case("v9", AArch64::Q9)
2143 .Case("v10", AArch64::Q10)
2144 .Case("v11", AArch64::Q11)
2145 .Case("v12", AArch64::Q12)
2146 .Case("v13", AArch64::Q13)
2147 .Case("v14", AArch64::Q14)
2148 .Case("v15", AArch64::Q15)
2149 .Case("v16", AArch64::Q16)
2150 .Case("v17", AArch64::Q17)
2151 .Case("v18", AArch64::Q18)
2152 .Case("v19", AArch64::Q19)
2153 .Case("v20", AArch64::Q20)
2154 .Case("v21", AArch64::Q21)
2155 .Case("v22", AArch64::Q22)
2156 .Case("v23", AArch64::Q23)
2157 .Case("v24", AArch64::Q24)
2158 .Case("v25", AArch64::Q25)
2159 .Case("v26", AArch64::Q26)
2160 .Case("v27", AArch64::Q27)
2161 .Case("v28", AArch64::Q28)
2162 .Case("v29", AArch64::Q29)
2163 .Case("v30", AArch64::Q30)
2164 .Case("v31", AArch64::Q31)
2165 .Default(0);
2166}
2167
2168/// Returns an optional pair of (#elements, element-width) if Suffix
2169/// is a valid vector kind. Where the number of elements in a vector
2170/// or the vector width is implicit or explicitly unknown (but still a
2171/// valid suffix kind), 0 is used.
2172static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2173 RegKind VectorKind) {
2174 std::pair<int, int> Res = {-1, -1};
2175
2176 switch (VectorKind) {
2177 case RegKind::NeonVector:
2178 Res =
2179 StringSwitch<std::pair<int, int>>(Suffix.lower())
2180 .Case("", {0, 0})
2181 .Case(".1d", {1, 64})
2182 .Case(".1q", {1, 128})
2183 // '.2h' needed for fp16 scalar pairwise reductions
2184 .Case(".2h", {2, 16})
2185 .Case(".2s", {2, 32})
2186 .Case(".2d", {2, 64})
2187 // '.4b' is another special case for the ARMv8.2a dot product
2188 // operand
2189 .Case(".4b", {4, 8})
2190 .Case(".4h", {4, 16})
2191 .Case(".4s", {4, 32})
2192 .Case(".8b", {8, 8})
2193 .Case(".8h", {8, 16})
2194 .Case(".16b", {16, 8})
2195 // Accept the width neutral ones, too, for verbose syntax. If those
2196 // aren't used in the right places, the token operand won't match so
2197 // all will work out.
2198 .Case(".b", {0, 8})
2199 .Case(".h", {0, 16})
2200 .Case(".s", {0, 32})
2201 .Case(".d", {0, 64})
2202 .Default({-1, -1});
2203 break;
2204 case RegKind::SVEPredicateVector:
2205 case RegKind::SVEDataVector:
2206 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2207 .Case("", {0, 0})
2208 .Case(".b", {0, 8})
2209 .Case(".h", {0, 16})
2210 .Case(".s", {0, 32})
2211 .Case(".d", {0, 64})
2212 .Case(".q", {0, 128})
2213 .Default({-1, -1});
2214 break;
2215 default:
2216 llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2216)
;
2217 }
2218
2219 if (Res == std::make_pair(-1, -1))
2220 return Optional<std::pair<int, int>>();
2221
2222 return Optional<std::pair<int, int>>(Res);
2223}
2224
2225static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2226 return parseVectorKind(Suffix, VectorKind).hasValue();
2227}
2228
2229static unsigned matchSVEDataVectorRegName(StringRef Name) {
2230 return StringSwitch<unsigned>(Name.lower())
2231 .Case("z0", AArch64::Z0)
2232 .Case("z1", AArch64::Z1)
2233 .Case("z2", AArch64::Z2)
2234 .Case("z3", AArch64::Z3)
2235 .Case("z4", AArch64::Z4)
2236 .Case("z5", AArch64::Z5)
2237 .Case("z6", AArch64::Z6)
2238 .Case("z7", AArch64::Z7)
2239 .Case("z8", AArch64::Z8)
2240 .Case("z9", AArch64::Z9)
2241 .Case("z10", AArch64::Z10)
2242 .Case("z11", AArch64::Z11)
2243 .Case("z12", AArch64::Z12)
2244 .Case("z13", AArch64::Z13)
2245 .Case("z14", AArch64::Z14)
2246 .Case("z15", AArch64::Z15)
2247 .Case("z16", AArch64::Z16)
2248 .Case("z17", AArch64::Z17)
2249 .Case("z18", AArch64::Z18)
2250 .Case("z19", AArch64::Z19)
2251 .Case("z20", AArch64::Z20)
2252 .Case("z21", AArch64::Z21)
2253 .Case("z22", AArch64::Z22)
2254 .Case("z23", AArch64::Z23)
2255 .Case("z24", AArch64::Z24)
2256 .Case("z25", AArch64::Z25)
2257 .Case("z26", AArch64::Z26)
2258 .Case("z27", AArch64::Z27)
2259 .Case("z28", AArch64::Z28)
2260 .Case("z29", AArch64::Z29)
2261 .Case("z30", AArch64::Z30)
2262 .Case("z31", AArch64::Z31)
2263 .Default(0);
2264}
2265
2266static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2267 return StringSwitch<unsigned>(Name.lower())
2268 .Case("p0", AArch64::P0)
2269 .Case("p1", AArch64::P1)
2270 .Case("p2", AArch64::P2)
2271 .Case("p3", AArch64::P3)
2272 .Case("p4", AArch64::P4)
2273 .Case("p5", AArch64::P5)
2274 .Case("p6", AArch64::P6)
2275 .Case("p7", AArch64::P7)
2276 .Case("p8", AArch64::P8)
2277 .Case("p9", AArch64::P9)
2278 .Case("p10", AArch64::P10)
2279 .Case("p11", AArch64::P11)
2280 .Case("p12", AArch64::P12)
2281 .Case("p13", AArch64::P13)
2282 .Case("p14", AArch64::P14)
2283 .Case("p15", AArch64::P15)
2284 .Default(0);
2285}
2286
2287bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2288 SMLoc &EndLoc) {
2289 return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
26
Calling 'AArch64AsmParser::tryParseRegister'
32
Returning from 'AArch64AsmParser::tryParseRegister'
33
Returning without writing to 'RegNo'
2290}
2291
2292OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2293 SMLoc &StartLoc,
2294 SMLoc &EndLoc) {
2295 StartLoc = getLoc();
2296 auto Res = tryParseScalarRegister(RegNo);
27
Calling 'AArch64AsmParser::tryParseScalarRegister'
30
Returning from 'AArch64AsmParser::tryParseScalarRegister'
2297 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2298 return Res;
31
Returning without writing to 'RegNo'
2299}
2300
2301// Matches a register name or register alias previously defined by '.req'
2302unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2303 RegKind Kind) {
2304 unsigned RegNum = 0;
2305 if ((RegNum = matchSVEDataVectorRegName(Name)))
2306 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2307
2308 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2309 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2310
2311 if ((RegNum = MatchNeonVectorRegName(Name)))
2312 return Kind == RegKind::NeonVector ? RegNum : 0;
2313
2314 // The parsed register must be of RegKind Scalar
2315 if ((RegNum = MatchRegisterName(Name)))
2316 return Kind == RegKind::Scalar ? RegNum : 0;
2317
2318 if (!RegNum) {
2319 // Handle a few common aliases of registers.
2320 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2321 .Case("fp", AArch64::FP)
2322 .Case("lr", AArch64::LR)
2323 .Case("x31", AArch64::XZR)
2324 .Case("w31", AArch64::WZR)
2325 .Default(0))
2326 return Kind == RegKind::Scalar ? RegNum : 0;
2327
2328 // Check for aliases registered via .req. Canonicalize to lower case.
2329 // That's more consistent since register names are case insensitive, and
2330 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2331 auto Entry = RegisterReqs.find(Name.lower());
2332 if (Entry == RegisterReqs.end())
2333 return 0;
2334
2335 // set RegNum if the match is the right kind of register
2336 if (Kind == Entry->getValue().first)
2337 RegNum = Entry->getValue().second;
2338 }
2339 return RegNum;
2340}
2341
2342/// tryParseScalarRegister - Try to parse a register name. The token must be an
2343/// Identifier when called, and if it is a register name the token is eaten and
2344/// the register is added to the operand list.
2345OperandMatchResultTy
2346AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2347 MCAsmParser &Parser = getParser();
2348 const AsmToken &Tok = Parser.getTok();
2349 if (Tok.isNot(AsmToken::Identifier))
28
Taking true branch
2350 return MatchOperand_NoMatch;
29
Returning without writing to 'RegNum'
2351
2352 std::string lowerCase = Tok.getString().lower();
2353 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2354 if (Reg == 0)
2355 return MatchOperand_NoMatch;
2356
2357 RegNum = Reg;
2358 Parser.Lex(); // Eat identifier token.
2359 return MatchOperand_Success;
2360}
2361
2362/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2363OperandMatchResultTy
2364AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2365 MCAsmParser &Parser = getParser();
2366 SMLoc S = getLoc();
2367
2368 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2369 Error(S, "Expected cN operand where 0 <= N <= 15");
2370 return MatchOperand_ParseFail;
2371 }
2372
2373 StringRef Tok = Parser.getTok().getIdentifier();
2374 if (Tok[0] != 'c' && Tok[0] != 'C') {
2375 Error(S, "Expected cN operand where 0 <= N <= 15");
2376 return MatchOperand_ParseFail;
2377 }
2378
2379 uint32_t CRNum;
2380 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2381 if (BadNum || CRNum > 15) {
2382 Error(S, "Expected cN operand where 0 <= N <= 15");
2383 return MatchOperand_ParseFail;
2384 }
2385
2386 Parser.Lex(); // Eat identifier token.
2387 Operands.push_back(
2388 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2389 return MatchOperand_Success;
2390}
2391
2392/// tryParsePrefetch - Try to parse a prefetch operand.
2393template <bool IsSVEPrefetch>
2394OperandMatchResultTy
2395AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2396 MCAsmParser &Parser = getParser();
2397 SMLoc S = getLoc();
2398 const AsmToken &Tok = Parser.getTok();
2399
2400 auto LookupByName = [](StringRef N) {
2401 if (IsSVEPrefetch) {
2402 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2403 return Optional<unsigned>(Res->Encoding);
2404 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2405 return Optional<unsigned>(Res->Encoding);
2406 return Optional<unsigned>();
2407 };
2408
2409 auto LookupByEncoding = [](unsigned E) {
2410 if (IsSVEPrefetch) {
2411 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2412 return Optional<StringRef>(Res->Name);
2413 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2414 return Optional<StringRef>(Res->Name);
2415 return Optional<StringRef>();
2416 };
2417 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2418
2419 // Either an identifier for named values or a 5-bit immediate.
2420 // Eat optional hash.
2421 if (parseOptionalToken(AsmToken::Hash) ||
2422 Tok.is(AsmToken::Integer)) {
2423 const MCExpr *ImmVal;
2424 if (getParser().parseExpression(ImmVal))
2425 return MatchOperand_ParseFail;
2426
2427 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2428 if (!MCE) {
2429 TokError("immediate value expected for prefetch operand");
2430 return MatchOperand_ParseFail;
2431 }
2432 unsigned prfop = MCE->getValue();
2433 if (prfop > MaxVal) {
2434 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2435 "] expected");
2436 return MatchOperand_ParseFail;
2437 }
2438
2439 auto PRFM = LookupByEncoding(MCE->getValue());
2440 Operands.push_back(AArch64Operand::CreatePrefetch(
2441 prfop, PRFM.getValueOr(""), S, getContext()));
2442 return MatchOperand_Success;
2443 }
2444
2445 if (Tok.isNot(AsmToken::Identifier)) {
2446 TokError("prefetch hint expected");
2447 return MatchOperand_ParseFail;
2448 }
2449
2450 auto PRFM = LookupByName(Tok.getString());
2451 if (!PRFM) {
2452 TokError("prefetch hint expected");
2453 return MatchOperand_ParseFail;
2454 }
2455
2456 Operands.push_back(AArch64Operand::CreatePrefetch(
2457 *PRFM, Tok.getString(), S, getContext()));
2458 Parser.Lex(); // Eat identifier token.
2459 return MatchOperand_Success;
2460}
2461
2462/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2463OperandMatchResultTy
2464AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2465 MCAsmParser &Parser = getParser();
2466 SMLoc S = getLoc();
2467 const AsmToken &Tok = Parser.getTok();
2468 if (Tok.isNot(AsmToken::Identifier)) {
2469 TokError("invalid operand for instruction");
2470 return MatchOperand_ParseFail;
2471 }
2472
2473 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2474 if (!PSB) {
2475 TokError("invalid operand for instruction");
2476 return MatchOperand_ParseFail;
2477 }
2478
2479 Operands.push_back(AArch64Operand::CreatePSBHint(
2480 PSB->Encoding, Tok.getString(), S, getContext()));
2481 Parser.Lex(); // Eat identifier token.
2482 return MatchOperand_Success;
2483}
2484
2485/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2486OperandMatchResultTy
2487AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2488 MCAsmParser &Parser = getParser();
2489 SMLoc S = getLoc();
2490 const AsmToken &Tok = Parser.getTok();
2491 if (Tok.isNot(AsmToken::Identifier)) {
2492 TokError("invalid operand for instruction");
2493 return MatchOperand_ParseFail;
2494 }
2495
2496 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2497 if (!BTI) {
2498 TokError("invalid operand for instruction");
2499 return MatchOperand_ParseFail;
2500 }
2501
2502 Operands.push_back(AArch64Operand::CreateBTIHint(
2503 BTI->Encoding, Tok.getString(), S, getContext()));
2504 Parser.Lex(); // Eat identifier token.
2505 return MatchOperand_Success;
2506}
2507
2508/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2509/// instruction.
2510OperandMatchResultTy
2511AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2512 MCAsmParser &Parser = getParser();
2513 SMLoc S = getLoc();
2514 const MCExpr *Expr = nullptr;
2515
2516 if (Parser.getTok().is(AsmToken::Hash)) {
2517 Parser.Lex(); // Eat hash token.
2518 }
2519
2520 if (parseSymbolicImmVal(Expr))
2521 return MatchOperand_ParseFail;
2522
2523 AArch64MCExpr::VariantKind ELFRefKind;
2524 MCSymbolRefExpr::VariantKind DarwinRefKind;
2525 int64_t Addend;
2526 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2527 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2528 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2529 // No modifier was specified at all; this is the syntax for an ELF basic
2530 // ADRP relocation (unfortunately).
2531 Expr =
2532 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2533 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2534 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2535 Addend != 0) {
2536 Error(S, "gotpage label reference not allowed an addend");
2537 return MatchOperand_ParseFail;
2538 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2539 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2540 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2541 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2542 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2543 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2544 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2545 // The operand must be an @page or @gotpage qualified symbolref.
2546 Error(S, "page or gotpage label reference expected");
2547 return MatchOperand_ParseFail;
2548 }
2549 }
2550
2551 // We have either a label reference possibly with addend or an immediate. The
2552 // addend is a raw value here. The linker will adjust it to only reference the
2553 // page.
2554 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2555 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2556
2557 return MatchOperand_Success;
2558}
2559
2560/// tryParseAdrLabel - Parse and validate a source label for the ADR
2561/// instruction.
2562OperandMatchResultTy
2563AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2564 SMLoc S = getLoc();
2565 const MCExpr *Expr = nullptr;
2566
2567 // Leave anything with a bracket to the default for SVE
2568 if (getParser().getTok().is(AsmToken::LBrac))
2569 return MatchOperand_NoMatch;
2570
2571 if (getParser().getTok().is(AsmToken::Hash))
2572 getParser().Lex(); // Eat hash token.
2573
2574 if (parseSymbolicImmVal(Expr))
2575 return MatchOperand_ParseFail;
2576
2577 AArch64MCExpr::VariantKind ELFRefKind;
2578 MCSymbolRefExpr::VariantKind DarwinRefKind;
2579 int64_t Addend;
2580 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2581 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2582 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2583 // No modifier was specified at all; this is the syntax for an ELF basic
2584 // ADR relocation (unfortunately).
2585 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2586 } else {
2587 Error(S, "unexpected adr label");
2588 return MatchOperand_ParseFail;
2589 }
2590 }
2591
2592 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2593 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2594 return MatchOperand_Success;
2595}
2596
2597/// tryParseFPImm - A floating point immediate expression operand.
2598template<bool AddFPZeroAsLiteral>
2599OperandMatchResultTy
2600AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2601 MCAsmParser &Parser = getParser();
2602 SMLoc S = getLoc();
2603
2604 bool Hash = parseOptionalToken(AsmToken::Hash);
2605
2606 // Handle negation, as that still comes through as a separate token.
2607 bool isNegative = parseOptionalToken(AsmToken::Minus);
2608
2609 const AsmToken &Tok = Parser.getTok();
2610 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2611 if (!Hash)
2612 return MatchOperand_NoMatch;
2613 TokError("invalid floating point immediate");
2614 return MatchOperand_ParseFail;
2615 }
2616
2617 // Parse hexadecimal representation.
2618 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2619 if (Tok.getIntVal() > 255 || isNegative) {
2620 TokError("encoded floating point value out of range");
2621 return MatchOperand_ParseFail;
2622 }
2623
2624 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2625 Operands.push_back(
2626 AArch64Operand::CreateFPImm(F, true, S, getContext()));
2627 } else {
2628 // Parse FP representation.
2629 APFloat RealVal(APFloat::IEEEdouble());
2630 auto StatusOrErr =
2631 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2632 if (errorToBool(StatusOrErr.takeError())) {
2633 TokError("invalid floating point representation");
2634 return MatchOperand_ParseFail;
2635 }
2636
2637 if (isNegative)
2638 RealVal.changeSign();
2639
2640 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2641 Operands.push_back(
2642 AArch64Operand::CreateToken("#0", false, S, getContext()));
2643 Operands.push_back(
2644 AArch64Operand::CreateToken(".0", false, S, getContext()));
2645 } else
2646 Operands.push_back(AArch64Operand::CreateFPImm(
2647 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2648 }
2649
2650 Parser.Lex(); // Eat the token.
2651
2652 return MatchOperand_Success;
2653}
2654
2655/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2656/// a shift suffix, for example '#1, lsl #12'.
2657OperandMatchResultTy
2658AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2659 MCAsmParser &Parser = getParser();
2660 SMLoc S = getLoc();
2661
2662 if (Parser.getTok().is(AsmToken::Hash))
2663 Parser.Lex(); // Eat '#'
2664 else if (Parser.getTok().isNot(AsmToken::Integer))
2665 // Operand should start from # or should be integer, emit error otherwise.
2666 return MatchOperand_NoMatch;
2667
2668 const MCExpr *Imm = nullptr;
2669 if (parseSymbolicImmVal(Imm))
2670 return MatchOperand_ParseFail;
2671 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2672 SMLoc E = Parser.getTok().getLoc();
2673 Operands.push_back(
2674 AArch64Operand::CreateImm(Imm, S, E, getContext()));
2675 return MatchOperand_Success;
2676 }
2677
2678 // Eat ','
2679 Parser.Lex();
2680
2681 // The optional operand must be "lsl #N" where N is non-negative.
2682 if (!Parser.getTok().is(AsmToken::Identifier) ||
2683 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2684 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2685 return MatchOperand_ParseFail;
2686 }
2687
2688 // Eat 'lsl'
2689 Parser.Lex();
2690
2691 parseOptionalToken(AsmToken::Hash);
2692
2693 if (Parser.getTok().isNot(AsmToken::Integer)) {
2694 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2695 return MatchOperand_ParseFail;
2696 }
2697
2698 int64_t ShiftAmount = Parser.getTok().getIntVal();
2699
2700 if (ShiftAmount < 0) {
2701 Error(Parser.getTok().getLoc(), "positive shift amount required");
2702 return MatchOperand_ParseFail;
2703 }
2704 Parser.Lex(); // Eat the number
2705
2706 // Just in case the optional lsl #0 is used for immediates other than zero.
2707 if (ShiftAmount == 0 && Imm != nullptr) {
2708 SMLoc E = Parser.getTok().getLoc();
2709 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2710 return MatchOperand_Success;
2711 }
2712
2713 SMLoc E = Parser.getTok().getLoc();
2714 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2715 S, E, getContext()));
2716 return MatchOperand_Success;
2717}
2718
2719/// parseCondCodeString - Parse a Condition Code string.
2720AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2721 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2722 .Case("eq", AArch64CC::EQ)
2723 .Case("ne", AArch64CC::NE)
2724 .Case("cs", AArch64CC::HS)
2725 .Case("hs", AArch64CC::HS)
2726 .Case("cc", AArch64CC::LO)
2727 .Case("lo", AArch64CC::LO)
2728 .Case("mi", AArch64CC::MI)
2729 .Case("pl", AArch64CC::PL)
2730 .Case("vs", AArch64CC::VS)
2731 .Case("vc", AArch64CC::VC)
2732 .Case("hi", AArch64CC::HI)
2733 .Case("ls", AArch64CC::LS)
2734 .Case("ge", AArch64CC::GE)
2735 .Case("lt", AArch64CC::LT)
2736 .Case("gt", AArch64CC::GT)
2737 .Case("le", AArch64CC::LE)
2738 .Case("al", AArch64CC::AL)
2739 .Case("nv", AArch64CC::NV)
2740 .Default(AArch64CC::Invalid);
2741
2742 if (CC == AArch64CC::Invalid &&
2743 getSTI().getFeatureBits()[AArch64::FeatureSVE])
2744 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2745 .Case("none", AArch64CC::EQ)
2746 .Case("any", AArch64CC::NE)
2747 .Case("nlast", AArch64CC::HS)
2748 .Case("last", AArch64CC::LO)
2749 .Case("first", AArch64CC::MI)
2750 .Case("nfrst", AArch64CC::PL)
2751 .Case("pmore", AArch64CC::HI)
2752 .Case("plast", AArch64CC::LS)
2753 .Case("tcont", AArch64CC::GE)
2754 .Case("tstop", AArch64CC::LT)
2755 .Default(AArch64CC::Invalid);
2756
2757 return CC;
2758}
2759
2760/// parseCondCode - Parse a Condition Code operand.
2761bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2762 bool invertCondCode) {
2763 MCAsmParser &Parser = getParser();
2764 SMLoc S = getLoc();
2765 const AsmToken &Tok = Parser.getTok();
2766 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")((Tok.is(AsmToken::Identifier) && "Token is not an Identifier"
) ? static_cast<void> (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2766, __PRETTY_FUNCTION__))
;
2767
2768 StringRef Cond = Tok.getString();
2769 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2770 if (CC == AArch64CC::Invalid)
2771 return TokError("invalid condition code");
2772 Parser.Lex(); // Eat identifier token.
2773
2774 if (invertCondCode) {
2775 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2776 return TokError("condition codes AL and NV are invalid for this instruction");
2777 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2778 }
2779
2780 Operands.push_back(
2781 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2782 return false;
2783}
2784
2785/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2786/// them if present.
2787OperandMatchResultTy
2788AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2789 MCAsmParser &Parser = getParser();
2790 const AsmToken &Tok = Parser.getTok();
2791 std::string LowerID = Tok.getString().lower();
2792 AArch64_AM::ShiftExtendType ShOp =
2793 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2794 .Case("lsl", AArch64_AM::LSL)
2795 .Case("lsr", AArch64_AM::LSR)
2796 .Case("asr", AArch64_AM::ASR)
2797 .Case("ror", AArch64_AM::ROR)
2798 .Case("msl", AArch64_AM::MSL)
2799 .Case("uxtb", AArch64_AM::UXTB)
2800 .Case("uxth", AArch64_AM::UXTH)
2801 .Case("uxtw", AArch64_AM::UXTW)
2802 .Case("uxtx", AArch64_AM::UXTX)
2803 .Case("sxtb", AArch64_AM::SXTB)
2804 .Case("sxth", AArch64_AM::SXTH)
2805 .Case("sxtw", AArch64_AM::SXTW)
2806 .Case("sxtx", AArch64_AM::SXTX)
2807 .Default(AArch64_AM::InvalidShiftExtend);
2808
2809 if (ShOp == AArch64_AM::InvalidShiftExtend)
2810 return MatchOperand_NoMatch;
2811
2812 SMLoc S = Tok.getLoc();
2813 Parser.Lex();
2814
2815 bool Hash = parseOptionalToken(AsmToken::Hash);
2816
2817 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2818 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2819 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2820 ShOp == AArch64_AM::MSL) {
2821 // We expect a number here.
2822 TokError("expected #imm after shift specifier");
2823 return MatchOperand_ParseFail;
2824 }
2825
2826 // "extend" type operations don't need an immediate, #0 is implicit.
2827 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2828 Operands.push_back(
2829 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2830 return MatchOperand_Success;
2831 }
2832
2833 // Make sure we do actually have a number, identifier or a parenthesized
2834 // expression.
2835 SMLoc E = Parser.getTok().getLoc();
2836 if (!Parser.getTok().is(AsmToken::Integer) &&
2837 !Parser.getTok().is(AsmToken::LParen) &&
2838 !Parser.getTok().is(AsmToken::Identifier)) {
2839 Error(E, "expected integer shift amount");
2840 return MatchOperand_ParseFail;
2841 }
2842
2843 const MCExpr *ImmVal;
2844 if (getParser().parseExpression(ImmVal))
2845 return MatchOperand_ParseFail;
2846
2847 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2848 if (!MCE) {
2849 Error(E, "expected constant '#imm' after shift specifier");
2850 return MatchOperand_ParseFail;
2851 }
2852
2853 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2854 Operands.push_back(AArch64Operand::CreateShiftExtend(
2855 ShOp, MCE->getValue(), true, S, E, getContext()));
2856 return MatchOperand_Success;
2857}
2858
2859static const struct Extension {
2860 const char *Name;
2861 const FeatureBitset Features;
2862} ExtensionMap[] = {
2863 {"crc", {AArch64::FeatureCRC}},
2864 {"sm4", {AArch64::FeatureSM4}},
2865 {"sha3", {AArch64::FeatureSHA3}},
2866 {"sha2", {AArch64::FeatureSHA2}},
2867 {"aes", {AArch64::FeatureAES}},
2868 {"crypto", {AArch64::FeatureCrypto}},
2869 {"fp", {AArch64::FeatureFPARMv8}},
2870 {"simd", {AArch64::FeatureNEON}},
2871 {"ras", {AArch64::FeatureRAS}},
2872 {"lse", {AArch64::FeatureLSE}},
2873 {"predres", {AArch64::FeaturePredRes}},
2874 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
2875 {"mte", {AArch64::FeatureMTE}},
2876 {"memtag", {AArch64::FeatureMTE}},
2877 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
2878 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
2879 {"ccpp", {AArch64::FeatureCCPP}},
2880 {"rcpc", {AArch64::FeatureRCPC}},
2881 {"sve", {AArch64::FeatureSVE}},
2882 {"sve2", {AArch64::FeatureSVE2}},
2883 {"sve2-aes", {AArch64::FeatureSVE2AES}},
2884 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
2885 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
2886 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
2887 // FIXME: Unsupported extensions
2888 {"pan", {}},
2889 {"lor", {}},
2890 {"rdma", {}},
2891 {"profile", {}},
2892};
2893
2894static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2895 if (FBS[AArch64::HasV8_1aOps])
2896 Str += "ARMv8.1a";
2897 else if (FBS[AArch64::HasV8_2aOps])
2898 Str += "ARMv8.2a";
2899 else if (FBS[AArch64::HasV8_3aOps])
2900 Str += "ARMv8.3a";
2901 else if (FBS[AArch64::HasV8_4aOps])
2902 Str += "ARMv8.4a";
2903 else if (FBS[AArch64::HasV8_5aOps])
2904 Str += "ARMv8.5a";
2905 else if (FBS[AArch64::HasV8_6aOps])
2906 Str += "ARMv8.6a";
2907 else {
2908 auto ext = std::find_if(std::begin(ExtensionMap),
2909 std::end(ExtensionMap),
2910 [&](const Extension& e)
2911 // Use & in case multiple features are enabled
2912 { return (FBS & e.Features) != FeatureBitset(); }
2913 );
2914
2915 Str += ext != std::end(ExtensionMap) ? ext->Name : "(unknown)";
2916 }
2917}
2918
2919void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2920 SMLoc S) {
2921 const uint16_t Op2 = Encoding & 7;
2922 const uint16_t Cm = (Encoding & 0x78) >> 3;
2923 const uint16_t Cn = (Encoding & 0x780) >> 7;
2924 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2925
2926 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2927
2928 Operands.push_back(
2929 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2930 Operands.push_back(
2931 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2932 Operands.push_back(
2933 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2934 Expr = MCConstantExpr::create(Op2, getContext());
2935 Operands.push_back(
2936 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2937}
2938
2939/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2940/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2941bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2942 OperandVector &Operands) {
2943 if (Name.find('.') != StringRef::npos)
2944 return TokError("invalid operand");
2945
2946 Mnemonic = Name;
2947 Operands.push_back(
2948 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2949
2950 MCAsmParser &Parser = getParser();
2951 const AsmToken &Tok = Parser.getTok();
2952 StringRef Op = Tok.getString();
2953 SMLoc S = Tok.getLoc();
2954
2955 if (Mnemonic == "ic") {
2956 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2957 if (!IC)
2958 return TokError("invalid operand for IC instruction");
2959 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2960 std::string Str("IC " + std::string(IC->Name) + " requires ");
2961 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2962 return TokError(Str.c_str());
2963 }
2964 createSysAlias(IC->Encoding, Operands, S);
2965 } else if (Mnemonic == "dc") {
2966 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2967 if (!DC)
2968 return TokError("invalid operand for DC instruction");
2969 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2970 std::string Str("DC " + std::string(DC->Name) + " requires ");
2971 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
2972 return TokError(Str.c_str());
2973 }
2974 createSysAlias(DC->Encoding, Operands, S);
2975 } else if (Mnemonic == "at") {
2976 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2977 if (!AT)
2978 return TokError("invalid operand for AT instruction");
2979 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2980 std::string Str("AT " + std::string(AT->Name) + " requires ");
2981 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
2982 return TokError(Str.c_str());
2983 }
2984 createSysAlias(AT->Encoding, Operands, S);
2985 } else if (Mnemonic == "tlbi") {
2986 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2987 if (!TLBI)
2988 return TokError("invalid operand for TLBI instruction");
2989 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2990 std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2991 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
2992 return TokError(Str.c_str());
2993 }
2994 createSysAlias(TLBI->Encoding, Operands, S);
2995 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
2996 const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
2997 if (!PRCTX)
2998 return TokError("invalid operand for prediction restriction instruction");
2999 else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
3000 std::string Str(
3001 Mnemonic.upper() + std::string(PRCTX->Name) + " requires ");
3002 setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
3003 return TokError(Str.c_str());
3004 }
3005 uint16_t PRCTX_Op2 =
3006 Mnemonic == "cfp" ? 4 :
3007 Mnemonic == "dvp" ? 5 :
3008 Mnemonic == "cpp" ? 7 :
3009 0;
3010 assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction")((PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction"
) ? static_cast<void> (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3010, __PRETTY_FUNCTION__))
;
3011 createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
3012 }
3013
3014 Parser.Lex(); // Eat operand.
3015
3016 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3017 bool HasRegister = false;
3018
3019 // Check for the optional register operand.
3020 if (parseOptionalToken(AsmToken::Comma)) {
3021 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3022 return TokError("expected register operand");
3023 HasRegister = true;
3024 }
3025
3026 if (ExpectRegister && !HasRegister)
3027 return TokError("specified " + Mnemonic + " op requires a register");
3028 else if (!ExpectRegister && HasRegister)
3029 return TokError("specified " + Mnemonic + " op does not use a register");
3030
3031 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3032 return true;
3033
3034 return false;
3035}
3036
3037OperandMatchResultTy
3038AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3039 MCAsmParser &Parser = getParser();
3040 const AsmToken &Tok = Parser.getTok();
3041
3042 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3043 TokError("'csync' operand expected");
3044 return MatchOperand_ParseFail;
3045 // Can be either a #imm style literal or an option name
3046 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3047 // Immediate operand.
3048 const MCExpr *ImmVal;
3049 SMLoc ExprLoc = getLoc();
3050 if (getParser().parseExpression(ImmVal))
3051 return MatchOperand_ParseFail;
3052 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3053 if (!MCE) {
3054 Error(ExprLoc, "immediate value expected for barrier operand");
3055 return MatchOperand_ParseFail;
3056 }
3057 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
3058 Error(ExprLoc, "barrier operand out of range");
3059 return MatchOperand_ParseFail;
3060 }
3061 auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
3062 Operands.push_back(AArch64Operand::CreateBarrier(
3063 MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
3064 return MatchOperand_Success;
3065 }
3066
3067 if (Tok.isNot(AsmToken::Identifier)) {
3068 TokError("invalid operand for instruction");
3069 return MatchOperand_ParseFail;
3070 }
3071
3072 auto TSB = AArch64TSB::lookupTSBByName(Tok.getString());
3073 // The only valid named option for ISB is 'sy'
3074 auto DB = AArch64DB::lookupDBByName(Tok.getString());
3075 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3076 TokError("'sy' or #imm operand expected");
3077 return MatchOperand_ParseFail;
3078 // The only valid named option for TSB is 'csync'
3079 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3080 TokError("'csync' operand expected");
3081 return MatchOperand_ParseFail;
3082 } else if (!DB && !TSB) {
3083 TokError("invalid barrier option name");
3084 return MatchOperand_ParseFail;
3085 }
3086
3087 Operands.push_back(AArch64Operand::CreateBarrier(
3088 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext()));
3089 Parser.Lex(); // Consume the option
3090
3091 return MatchOperand_Success;
3092}
3093
3094OperandMatchResultTy
3095AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3096 MCAsmParser &Parser = getParser();
3097 const AsmToken &Tok = Parser.getTok();
3098
3099 if (Tok.isNot(AsmToken::Identifier))
3100 return MatchOperand_NoMatch;
3101
3102 int MRSReg, MSRReg;
3103 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3104 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3105 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3106 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3107 } else
3108 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3109
3110 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3111 unsigned PStateImm = -1;
3112 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3113 PStateImm = PState->Encoding;
3114
3115 Operands.push_back(
3116 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3117 PStateImm, getContext()));
3118 Parser.Lex(); // Eat identifier
3119
3120 return MatchOperand_Success;
3121}
3122
3123/// tryParseNeonVectorRegister - Parse a vector register operand.
3124bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3125 MCAsmParser &Parser = getParser();
3126 if (Parser.getTok().isNot(AsmToken::Identifier))
3127 return true;
3128
3129 SMLoc S = getLoc();
3130 // Check for a vector register specifier first.
3131 StringRef Kind;
3132 unsigned Reg;
3133 OperandMatchResultTy Res =
3134 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3135 if (Res != MatchOperand_Success)
3136 return true;
3137
3138 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3139 if (!KindRes)
3140 return true;
3141
3142 unsigned ElementWidth = KindRes->second;
3143 Operands.push_back(
3144 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3145 S, getLoc(), getContext()));
3146
3147 // If there was an explicit qualifier, that goes on as a literal text
3148 // operand.
3149 if (!Kind.empty())
3150 Operands.push_back(
3151 AArch64Operand::CreateToken(Kind, false, S, getContext()));
3152
3153 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3154}
3155
3156OperandMatchResultTy
3157AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3158 SMLoc SIdx = getLoc();
3159 if (parseOptionalToken(AsmToken::LBrac)) {
3160 const MCExpr *ImmVal;
3161 if (getParser().parseExpression(ImmVal))
3162 return MatchOperand_NoMatch;
3163 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3164 if (!MCE) {
3165 TokError("immediate value expected for vector index");
3166 return MatchOperand_ParseFail;;
3167 }
3168
3169 SMLoc E = getLoc();
3170
3171 if (parseToken(AsmToken::RBrac, "']' expected"))
3172 return MatchOperand_ParseFail;;
3173
3174 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3175 E, getContext()));
3176 return MatchOperand_Success;
3177 }
3178
3179 return MatchOperand_NoMatch;
3180}
3181
3182// tryParseVectorRegister - Try to parse a vector register name with
3183// optional kind specifier. If it is a register specifier, eat the token
3184// and return it.
3185OperandMatchResultTy
3186AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3187 RegKind MatchKind) {
3188 MCAsmParser &Parser = getParser();
3189 const AsmToken &Tok = Parser.getTok();
3190
3191 if (Tok.isNot(AsmToken::Identifier))
3192 return MatchOperand_NoMatch;
3193
3194 StringRef Name = Tok.getString();
3195 // If there is a kind specifier, it's separated from the register name by
3196 // a '.'.
3197 size_t Start = 0, Next = Name.find('.');
3198 StringRef Head = Name.slice(Start, Next);
3199 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3200
3201 if (RegNum) {
3202 if (Next != StringRef::npos) {
3203 Kind = Name.slice(Next, StringRef::npos);
3204 if (!isValidVectorKind(Kind, MatchKind)) {
3205 TokError("invalid vector kind qualifier");
3206 return MatchOperand_ParseFail;
3207 }
3208 }
3209 Parser.Lex(); // Eat the register token.
3210
3211 Reg = RegNum;
3212 return MatchOperand_Success;
3213 }
3214
3215 return MatchOperand_NoMatch;
3216}
3217
3218/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3219OperandMatchResultTy
3220AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3221 // Check for a SVE predicate register specifier first.
3222 const SMLoc S = getLoc();
3223 StringRef Kind;
3224 unsigned RegNum;
3225 auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3226 if (Res != MatchOperand_Success)
3227 return Res;
3228
3229 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3230 if (!KindRes)
3231 return MatchOperand_NoMatch;
3232
3233 unsigned ElementWidth = KindRes->second;
3234 Operands.push_back(AArch64Operand::CreateVectorReg(
3235 RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3236 getLoc(), getContext()));
3237
3238 // Not all predicates are followed by a '/m' or '/z'.
3239 MCAsmParser &Parser = getParser();
3240 if (Parser.getTok().isNot(AsmToken::Slash))
3241 return MatchOperand_Success;
3242
3243 // But when they do they shouldn't have an element type suffix.
3244 if (!Kind.empty()) {
3245 Error(S, "not expecting size suffix");
3246 return MatchOperand_ParseFail;
3247 }
3248
3249 // Add a literal slash as operand
3250 Operands.push_back(
3251 AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3252
3253 Parser.Lex(); // Eat the slash.
3254
3255 // Zeroing or merging?
3256 auto Pred = Parser.getTok().getString().lower();
3257 if (Pred != "z" && Pred != "m") {
3258 Error(getLoc(), "expecting 'm' or 'z' predication");
3259 return MatchOperand_ParseFail;
3260 }
3261
3262 // Add zero/merge token.
3263 const char *ZM = Pred == "z" ? "z" : "m";
3264 Operands.push_back(
3265 AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3266
3267 Parser.Lex(); // Eat zero/merge token.
3268 return MatchOperand_Success;
3269}
3270
3271/// parseRegister - Parse a register operand.
3272bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3273 // Try for a Neon vector register.
3274 if (!tryParseNeonVectorRegister(Operands))
3275 return false;
3276
3277 // Otherwise try for a scalar register.
3278 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3279 return false;
3280
3281 return true;
3282}
3283
3284bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3285 MCAsmParser &Parser = getParser();
3286 bool HasELFModifier = false;
3287 AArch64MCExpr::VariantKind RefKind;
3288
3289 if (parseOptionalToken(AsmToken::Colon)) {
3290 HasELFModifier = true;
3291
3292 if (Parser.getTok().isNot(AsmToken::Identifier))
3293 return TokError("expect relocation specifier in operand after ':'");
3294
3295 std::string LowerCase = Parser.getTok().getIdentifier().lower();
3296 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3297 .Case("lo12", AArch64MCExpr::VK_LO12)
3298 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3299 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3300 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3301 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3302 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3303 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3304 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3305 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3306 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3307 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3308 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3309 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3310 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3311 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3312 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3313 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3314 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3315 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3316 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3317 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3318 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3319 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3320 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3321 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3322 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3323 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3324 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3325 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3326 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3327 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3328 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3329 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3330 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3331 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3332 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3333 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3334 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3335 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3336 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3337 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3338 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3339 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3340 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3341 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3342 .Default(AArch64MCExpr::VK_INVALID);
3343
3344 if (RefKind == AArch64MCExpr::VK_INVALID)
3345 return TokError("expect relocation specifier in operand after ':'");
3346
3347 Parser.Lex(); // Eat identifier
3348
3349 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3350 return true;
3351 }
3352
3353 if (getParser().parseExpression(ImmVal))
3354 return true;
3355
3356 if (HasELFModifier)
3357 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3358
3359 return false;
3360}
3361
3362template <RegKind VectorKind>
3363OperandMatchResultTy
3364AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3365 bool ExpectMatch) {
3366 MCAsmParser &Parser = getParser();
3367 if (!Parser.getTok().is(AsmToken::LCurly))
3368 return MatchOperand_NoMatch;
3369
3370 // Wrapper around parse function
3371 auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3372 bool NoMatchIsError) {
3373 auto RegTok = Parser.getTok();
3374 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3375 if (ParseRes == MatchOperand_Success) {
3376 if (parseVectorKind(Kind, VectorKind))
3377 return ParseRes;
3378 llvm_unreachable("Expected a valid vector kind")::llvm::llvm_unreachable_internal("Expected a valid vector kind"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3378)
;
3379 }
3380
3381 if (RegTok.isNot(AsmToken::Identifier) ||
3382 ParseRes == MatchOperand_ParseFail ||
3383 (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3384 Error(Loc, "vector register expected");
3385 return MatchOperand_ParseFail;
3386 }
3387
3388 return MatchOperand_NoMatch;
3389 };
3390
3391 SMLoc S = getLoc();
3392 auto LCurly = Parser.getTok();
3393 Parser.Lex(); // Eat left bracket token.
3394
3395 StringRef Kind;
3396 unsigned FirstReg;
3397 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3398
3399 // Put back the original left bracket if there was no match, so that
3400 // different types of list-operands can be matched (e.g. SVE, Neon).
3401 if (ParseRes == MatchOperand_NoMatch)
3402 Parser.getLexer().UnLex(LCurly);
3403
3404 if (ParseRes != MatchOperand_Success)
3405 return ParseRes;
3406
3407 int64_t PrevReg = FirstReg;
3408 unsigned Count = 1;
3409
3410 if (parseOptionalToken(AsmToken::Minus)) {
3411 SMLoc Loc = getLoc();
3412 StringRef NextKind;
3413
3414 unsigned Reg;
3415 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3416 if (ParseRes != MatchOperand_Success)
3417 return ParseRes;
3418
3419 // Any Kind suffices must match on all regs in the list.
3420 if (Kind != NextKind) {
3421 Error(Loc, "mismatched register size suffix");
3422 return MatchOperand_ParseFail;
3423 }
3424
3425 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3426
3427 if (Space == 0 || Space > 3) {
3428 Error(Loc, "invalid number of vectors");
3429 return MatchOperand_ParseFail;
3430 }
3431
3432 Count += Space;
3433 }
3434 else {
3435 while (parseOptionalToken(AsmToken::Comma)) {
3436 SMLoc Loc = getLoc();
3437 StringRef NextKind;
3438 unsigned Reg;
3439 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3440 if (ParseRes != MatchOperand_Success)
3441 return ParseRes;
3442
3443 // Any Kind suffices must match on all regs in the list.
3444 if (Kind != NextKind) {
3445 Error(Loc, "mismatched register size suffix");
3446 return MatchOperand_ParseFail;
3447 }
3448
3449 // Registers must be incremental (with wraparound at 31)
3450 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3451 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3452 Error(Loc, "registers must be sequential");
3453 return MatchOperand_ParseFail;
3454 }
3455
3456 PrevReg = Reg;
3457 ++Count;
3458 }
3459 }
3460
3461 if (parseToken(AsmToken::RCurly, "'}' expected"))
3462 return MatchOperand_ParseFail;
3463
3464 if (Count > 4) {
3465 Error(S, "invalid number of vectors");
3466 return MatchOperand_ParseFail;
3467 }
3468
3469 unsigned NumElements = 0;
3470 unsigned ElementWidth = 0;
3471 if (!Kind.empty()) {
3472 if (const auto &VK = parseVectorKind(Kind, VectorKind))
3473 std::tie(NumElements, ElementWidth) = *VK;
3474 }
3475
3476 Operands.push_back(AArch64Operand::CreateVectorList(
3477 FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3478 getContext()));
3479
3480 return MatchOperand_Success;
3481}
3482
3483/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3484bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3485 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3486 if (ParseRes != MatchOperand_Success)
3487 return true;
3488
3489 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3490}
3491
3492OperandMatchResultTy
3493AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3494 SMLoc StartLoc = getLoc();
3495
3496 unsigned RegNum;
3497 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3498 if (Res != MatchOperand_Success)
3499 return Res;
3500
3501 if (!parseOptionalToken(AsmToken::Comma)) {
3502 Operands.push_back(AArch64Operand::CreateReg(
3503 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3504 return MatchOperand_Success;
3505 }
3506
3507 parseOptionalToken(AsmToken::Hash);
3508
3509 if (getParser().getTok().isNot(AsmToken::Integer)) {
3510 Error(getLoc(), "index must be absent or #0");
3511 return MatchOperand_ParseFail;
3512 }
3513
3514 const MCExpr *ImmVal;
3515 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3516 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3517 Error(getLoc(), "index must be absent or #0");
3518 return MatchOperand_ParseFail;
3519 }
3520
3521 Operands.push_back(AArch64Operand::CreateReg(
3522 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3523 return MatchOperand_Success;
3524}
3525
3526template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3527OperandMatchResultTy
3528AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3529 SMLoc StartLoc = getLoc();
3530
3531 unsigned RegNum;
3532 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3533 if (Res != MatchOperand_Success)
3534 return Res;
3535
3536 // No shift/extend is the default.
3537 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3538 Operands.push_back(AArch64Operand::CreateReg(
3539 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3540 return MatchOperand_Success;
3541 }
3542
3543 // Eat the comma
3544 getParser().Lex();
3545
3546 // Match the shift
3547 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
3548 Res = tryParseOptionalShiftExtend(ExtOpnd);
3549 if (Res != MatchOperand_Success)
3550 return Res;
3551
3552 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3553 Operands.push_back(AArch64Operand::CreateReg(
3554 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3555 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3556 Ext->hasShiftExtendAmount()));
3557
3558 return MatchOperand_Success;
3559}
3560
3561bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3562 MCAsmParser &Parser = getParser();
3563
3564 // Some SVE instructions have a decoration after the immediate, i.e.
3565 // "mul vl". We parse them here and add tokens, which must be present in the
3566 // asm string in the tablegen instruction.
3567 bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3568 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3569 if (!Parser.getTok().getString().equals_lower("mul") ||
3570 !(NextIsVL || NextIsHash))
3571 return true;
3572
3573 Operands.push_back(
3574 AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3575 Parser.Lex(); // Eat the "mul"
3576
3577 if (NextIsVL) {
3578 Operands.push_back(
3579 AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3580 Parser.Lex(); // Eat the "vl"
3581 return false;
3582 }
3583
3584 if (NextIsHash) {
3585 Parser.Lex(); // Eat the #
3586 SMLoc S = getLoc();
3587
3588 // Parse immediate operand.
3589 const MCExpr *ImmVal;
3590 if (!Parser.parseExpression(ImmVal))
3591 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3592 Operands.push_back(AArch64Operand::CreateImm(
3593 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3594 getContext()));
3595 return MatchOperand_Success;
3596 }
3597 }
3598
3599 return Error(getLoc(), "expected 'vl' or '#<imm>'");
3600}
3601
3602/// parseOperand - Parse a arm instruction operand. For now this parses the
3603/// operand regardless of the mnemonic.
3604bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3605 bool invertCondCode) {
3606 MCAsmParser &Parser = getParser();
3607
3608 OperandMatchResultTy ResTy =
3609 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3610
3611 // Check if the current operand has a custom associated parser, if so, try to
3612 // custom parse the operand, or fallback to the general approach.
3613 if (ResTy == MatchOperand_Success)
3614 return false;
3615 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3616 // there was a match, but an error occurred, in which case, just return that
3617 // the operand parsing failed.
3618 if (ResTy == MatchOperand_ParseFail)
3619 return true;
3620
3621 // Nothing custom, so do general case parsing.
3622 SMLoc S, E;
3623 switch (getLexer().getKind()) {
3624 default: {
3625 SMLoc S = getLoc();
3626 const MCExpr *Expr;
3627 if (parseSymbolicImmVal(Expr))
3628 return Error(S, "invalid operand");
3629
3630 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3631 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3632 return false;
3633 }
3634 case AsmToken::LBrac: {
3635 SMLoc Loc = Parser.getTok().getLoc();
3636 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3637 getContext()));
3638 Parser.Lex(); // Eat '['
3639
3640 // There's no comma after a '[', so we can parse the next operand
3641 // immediately.
3642 return parseOperand(Operands, false, false);
3643 }
3644 case AsmToken::LCurly:
3645 return parseNeonVectorList(Operands);
3646 case AsmToken::Identifier: {
3647 // If we're expecting a Condition Code operand, then just parse that.
3648 if (isCondCode)
3649 return parseCondCode(Operands, invertCondCode);
3650
3651 // If it's a register name, parse it.
3652 if (!parseRegister(Operands))
3653 return false;
3654
3655 // See if this is a "mul vl" decoration or "mul #<int>" operand used
3656 // by SVE instructions.
3657 if (!parseOptionalMulOperand(Operands))
3658 return false;
3659
3660 // This could be an optional "shift" or "extend" operand.
3661 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3662 // We can only continue if no tokens were eaten.
3663 if (GotShift != MatchOperand_NoMatch)
3664 return GotShift;
3665
3666 // This was not a register so parse other operands that start with an
3667 // identifier (like labels) as expressions and create them as immediates.
3668 const MCExpr *IdVal;
3669 S = getLoc();
3670 if (getParser().parseExpression(IdVal))
3671 return true;
3672 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3673 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3674 return false;
3675 }
3676 case AsmToken::Integer:
3677 case AsmToken::Real:
3678 case AsmToken::Hash: {
3679 // #42 -> immediate.
3680 S = getLoc();
3681
3682 parseOptionalToken(AsmToken::Hash);
3683
3684 // Parse a negative sign
3685 bool isNegative = false;
3686 if (Parser.getTok().is(AsmToken::Minus)) {
3687 isNegative = true;
3688 // We need to consume this token only when we have a Real, otherwise
3689 // we let parseSymbolicImmVal take care of it
3690 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3691 Parser.Lex();
3692 }
3693
3694 // The only Real that should come through here is a literal #0.0 for
3695 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3696 // so convert the value.
3697 const AsmToken &Tok = Parser.getTok();
3698 if (Tok.is(AsmToken::Real)) {
3699 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3700 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3701 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3702 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3703 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3704 return TokError("unexpected floating point literal");
3705 else if (IntVal != 0 || isNegative)
3706 return TokError("expected floating-point constant #0.0");
3707 Parser.Lex(); // Eat the token.
3708
3709 Operands.push_back(
3710 AArch64Operand::CreateToken("#0", false, S, getContext()));
3711 Operands.push_back(
3712 AArch64Operand::CreateToken(".0", false, S, getContext()));
3713 return false;
3714 }
3715
3716 const MCExpr *ImmVal;
3717 if (parseSymbolicImmVal(ImmVal))
3718 return true;
3719
3720 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3721 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3722 return false;
3723 }
3724 case AsmToken::Equal: {
3725 SMLoc Loc = getLoc();
3726 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3727 return TokError("unexpected token in operand");
3728 Parser.Lex(); // Eat '='
3729 const MCExpr *SubExprVal;
3730 if (getParser().parseExpression(SubExprVal))
3731 return true;
3732
3733 if (Operands.size() < 2 ||
3734 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3735 return Error(Loc, "Only valid when first operand is register");
3736
3737 bool IsXReg =
3738 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3739 Operands[1]->getReg());
3740
3741 MCContext& Ctx = getContext();
3742 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3743 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3744 if (isa<MCConstantExpr>(SubExprVal)) {
3745 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3746 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3747 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3748 ShiftAmt += 16;
3749 Imm >>= 16;
3750 }
3751 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3752 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3753 Operands.push_back(AArch64Operand::CreateImm(
3754 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3755 if (ShiftAmt)
3756 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3757 ShiftAmt, true, S, E, Ctx));
3758 return false;
3759 }
3760 APInt Simm = APInt(64, Imm << ShiftAmt);
3761 // check if the immediate is an unsigned or signed 32-bit int for W regs
3762 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3763 return Error(Loc, "Immediate too large for register");
3764 }
3765 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3766 const MCExpr *CPLoc =
3767 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3768 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3769 return false;
3770 }
3771 }
3772}
3773
3774bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
3775 const MCExpr *Expr = nullptr;
3776 SMLoc L = getLoc();
3777 if (check(getParser().parseExpression(Expr), L, "expected expression"))
3778 return true;
3779 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
3780 if (check(!Value, L, "expected constant expression"))
3781 return true;
3782 Out = Value->getValue();
3783 return false;
3784}
3785
3786bool AArch64AsmParser::parseComma() {
3787 if (check(getParser().getTok().isNot(AsmToken::Comma), getLoc(),
3788 "expected comma"))
3789 return true;
3790 // Eat the comma
3791 getParser().Lex();
3792 return false;
3793}
3794
3795bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
3796 unsigned First, unsigned Last) {
3797 unsigned Reg;
24
'Reg' declared without an initial value
3798 SMLoc Start, End;
3799 if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register"))
25
Calling 'AArch64AsmParser::ParseRegister'
34
Returning from 'AArch64AsmParser::ParseRegister'
35
Assuming the condition is false
36
Taking false branch
3800 return true;
3801
3802 // Special handling for FP and LR; they aren't linearly after x28 in
3803 // the registers enum.
3804 unsigned RangeEnd = Last;
3805 if (Base
36.1
'Base' is equal to X0
== AArch64::X0) {
37
Taking true branch
3806 if (Last
37.1
'Last' is equal to FP
== AArch64::FP) {
38
Taking true branch
3807 RangeEnd = AArch64::X28;
3808 if (Reg == AArch64::FP) {
39
The left operand of '==' is a garbage value
3809 Out = 29;
3810 return false;
3811 }
3812 }
3813 if (Last == AArch64::LR) {
3814 RangeEnd = AArch64::X28;
3815 if (Reg == AArch64::FP) {
3816 Out = 29;
3817 return false;
3818 } else if (Reg == AArch64::LR) {
3819 Out = 30;
3820 return false;
3821 }
3822 }
3823 }
3824
3825 if (check(Reg < First || Reg > RangeEnd, Start,
3826 Twine("expected register in range ") +
3827 AArch64InstPrinter::getRegisterName(First) + " to " +
3828 AArch64InstPrinter::getRegisterName(Last)))
3829 return true;
3830 Out = Reg - Base;
3831 return false;
3832}
3833
3834bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3835 const MCParsedAsmOperand &Op2) const {
3836 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3837 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3838 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3839 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3840 return MCTargetAsmParser::regsEqual(Op1, Op2);
3841
3842 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&((AOp1.isScalarReg() && AOp2.isScalarReg() &&
"Testing equality of non-scalar registers not supported") ? static_cast
<void> (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3843, __PRETTY_FUNCTION__))
3843 "Testing equality of non-scalar registers not supported")((AOp1.isScalarReg() && AOp2.isScalarReg() &&
"Testing equality of non-scalar registers not supported") ? static_cast
<void> (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3843, __PRETTY_FUNCTION__))
;
3844
3845 // Check if a registers match their sub/super register classes.
3846 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3847 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3848 if (AOp1.getRegEqualityTy() == EqualsSubReg)
3849 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3850 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3851 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3852 if (AOp2.getRegEqualityTy() == EqualsSubReg)
3853 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3854
3855 return false;
3856}
3857
3858/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3859/// operands.
3860bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3861 StringRef Name, SMLoc NameLoc,
3862 OperandVector &Operands) {
3863 MCAsmParser &Parser = getParser();
3864 Name = StringSwitch<StringRef>(Name.lower())
3865 .Case("beq", "b.eq")
3866 .Case("bne", "b.ne")
3867 .Case("bhs", "b.hs")
3868 .Case("bcs", "b.cs")
3869 .Case("blo", "b.lo")
3870 .Case("bcc", "b.cc")
3871 .Case("bmi", "b.mi")
3872 .Case("bpl", "b.pl")
3873 .Case("bvs", "b.vs")
3874 .Case("bvc", "b.vc")
3875 .Case("bhi", "b.hi")
3876 .Case("bls", "b.ls")
3877 .Case("bge", "b.ge")
3878 .Case("blt", "b.lt")
3879 .Case("bgt", "b.gt")
3880 .Case("ble", "b.le")
3881 .Case("bal", "b.al")
3882 .Case("bnv", "b.nv")
3883 .Default(Name);
3884
3885 // First check for the AArch64-specific .req directive.
3886 if (Parser.getTok().is(AsmToken::Identifier) &&
3887 Parser.getTok().getIdentifier().lower() == ".req") {
3888 parseDirectiveReq(Name, NameLoc);
3889 // We always return 'error' for this, as we're done with this
3890 // statement and don't need to match the 'instruction."
3891 return true;
3892 }
3893
3894 // Create the leading tokens for the mnemonic, split by '.' characters.
3895 size_t Start = 0, Next = Name.find('.');
3896 StringRef Head = Name.slice(Start, Next);
3897
3898 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
3899 // the SYS instruction.
3900 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
3901 Head == "cfp" || Head == "dvp" || Head == "cpp")
3902 return parseSysAlias(Head, NameLoc, Operands);
3903
3904 Operands.push_back(
3905 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3906 Mnemonic = Head;
3907
3908 // Handle condition codes for a branch mnemonic
3909 if (Head == "b" && Next != StringRef::npos) {
3910 Start = Next;
3911 Next = Name.find('.', Start + 1);
3912 Head = Name.slice(Start + 1, Next);
3913
3914 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3915 (Head.data() - Name.data()));
3916 AArch64CC::CondCode CC = parseCondCodeString(Head);
3917 if (CC == AArch64CC::Invalid)
3918 return Error(SuffixLoc, "invalid condition code");
3919 Operands.push_back(
3920 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3921 Operands.push_back(
3922 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3923 }
3924
3925 // Add the remaining tokens in the mnemonic.
3926 while (Next != StringRef::npos) {
3927 Start = Next;
3928 Next = Name.find('.', Start + 1);
3929 Head = Name.slice(Start, Next);
3930 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3931 (Head.data() - Name.data()) + 1);
3932 Operands.push_back(
3933 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3934 }
3935
3936 // Conditional compare instructions have a Condition Code operand, which needs
3937 // to be parsed and an immediate operand created.
3938 bool condCodeFourthOperand =
3939 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3940 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3941 Head == "csinc" || Head == "csinv" || Head == "csneg");
3942
3943 // These instructions are aliases to some of the conditional select
3944 // instructions. However, the condition code is inverted in the aliased
3945 // instruction.
3946 //
3947 // FIXME: Is this the correct way to handle these? Or should the parser
3948 // generate the aliased instructions directly?
3949 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3950 bool condCodeThirdOperand =
3951 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3952
3953 // Read the remaining operands.
3954 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3955
3956 unsigned N = 1;
3957 do {
3958 // Parse and remember the operand.
3959 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3960 (N == 3 && condCodeThirdOperand) ||
3961 (N == 2 && condCodeSecondOperand),
3962 condCodeSecondOperand || condCodeThirdOperand)) {
3963 return true;
3964 }
3965
3966 // After successfully parsing some operands there are two special cases to
3967 // consider (i.e. notional operands not separated by commas). Both are due
3968 // to memory specifiers:
3969 // + An RBrac will end an address for load/store/prefetch
3970 // + An '!' will indicate a pre-indexed operation.
3971 //
3972 // It's someone else's responsibility to make sure these tokens are sane
3973 // in the given context!
3974
3975 SMLoc RLoc = Parser.getTok().getLoc();
3976 if (parseOptionalToken(AsmToken::RBrac))
3977 Operands.push_back(
3978 AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3979 SMLoc ELoc = Parser.getTok().getLoc();
3980 if (parseOptionalToken(AsmToken::Exclaim))
3981 Operands.push_back(
3982 AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3983
3984 ++N;
3985 } while (parseOptionalToken(AsmToken::Comma));
3986 }
3987
3988 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3989 return true;
3990
3991 return false;
3992}
3993
3994static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
3995 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31))(((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31
)) ? static_cast<void> (0) : __assert_fail ("(ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3995, __PRETTY_FUNCTION__))
;
3996 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
3997 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
3998 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
3999 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
4000 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
4001 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
4002}
4003
4004// FIXME: This entire function is a giant hack to provide us with decent
4005// operand range validation/diagnostics until TableGen/MC can be extended
4006// to support autogeneration of this kind of validation.
4007bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
4008 SmallVectorImpl<SMLoc> &Loc) {
4009 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4010 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
4011
4012 // A prefix only applies to the instruction following it. Here we extract
4013 // prefix information for the next instruction before validating the current
4014 // one so that in the case of failure we don't erronously continue using the
4015 // current prefix.
4016 PrefixInfo Prefix = NextPrefix;
4017 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
4018
4019 // Before validating the instruction in isolation we run through the rules
4020 // applicable when it follows a prefix instruction.
4021 // NOTE: brk & hlt can be prefixed but require no additional validation.
4022 if (Prefix.isActive() &&
4023 (Inst.getOpcode() != AArch64::BRK) &&
4024 (Inst.getOpcode() != AArch64::HLT)) {
4025
4026 // Prefixed intructions must have a destructive operand.
4027 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
4028 AArch64::NotDestructive)
4029 return Error(IDLoc, "instruction is unpredictable when following a"
4030 " movprfx, suggest replacing movprfx with mov");
4031
4032 // Destination operands must match.
4033 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
4034 return Error(Loc[0], "instruction is unpredictable when following a"
4035 " movprfx writing to a different destination");
4036
4037 // Destination operand must not be used in any other location.
4038 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
4039 if (Inst.getOperand(i).isReg() &&
4040 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
4041 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
4042 return Error(Loc[0], "instruction is unpredictable when following a"
4043 " movprfx and destination also used as non-destructive"
4044 " source");
4045 }
4046
4047 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
4048 if (Prefix.isPredicated()) {
4049 int PgIdx = -1;
4050
4051 // Find the instructions general predicate.
4052 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
4053 if (Inst.getOperand(i).isReg() &&
4054 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
4055 PgIdx = i;
4056 break;
4057 }
4058
4059 // Instruction must be predicated if the movprfx is predicated.
4060 if (PgIdx == -1 ||
4061 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
4062 return Error(IDLoc, "instruction is unpredictable when following a"
4063 " predicated movprfx, suggest using unpredicated movprfx");
4064
4065 // Instruction must use same general predicate as the movprfx.
4066 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
4067 return Error(IDLoc, "instruction is unpredictable when following a"
4068 " predicated movprfx using a different general predicate");
4069
4070 // Instruction element type must match the movprfx.
4071 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
4072 return Error(IDLoc, "instruction is unpredictable when following a"
4073 " predicated movprfx with a different element size");
4074 }
4075 }
4076
4077 // Check for indexed addressing modes w/ the base register being the
4078 // same as a destination/source register or pair load where
4079 // the Rt == Rt2. All of those are undefined behaviour.
4080 switch (Inst.getOpcode()) {
4081 case AArch64::LDPSWpre:
4082 case AArch64::LDPWpost:
4083 case AArch64::LDPWpre:
4084 case AArch64::LDPXpost:
4085 case AArch64::LDPXpre: {
4086 unsigned Rt = Inst.getOperand(1).getReg();
4087 unsigned Rt2 = Inst.getOperand(2).getReg();
4088 unsigned Rn = Inst.getOperand(3).getReg();
4089 if (RI->isSubRegisterEq(Rn, Rt))
4090 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
4091 "is also a destination");
4092 if (RI->isSubRegisterEq(Rn, Rt2))
4093 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
4094 "is also a destination");
4095 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4096 }
4097 case AArch64::LDPDi:
4098 case AArch64::LDPQi:
4099 case AArch64::LDPSi:
4100 case AArch64::LDPSWi:
4101 case AArch64::LDPWi:
4102 case AArch64::LDPXi: {
4103 unsigned Rt = Inst.getOperand(0).getReg();
4104 unsigned Rt2 = Inst.getOperand(1).getReg();
4105 if (Rt == Rt2)
4106 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4107 break;
4108 }
4109 case AArch64::LDPDpost:
4110 case AArch64::LDPDpre:
4111 case AArch64::LDPQpost:
4112 case AArch64::LDPQpre:
4113 case AArch64::LDPSpost:
4114 case AArch64::LDPSpre:
4115 case AArch64::LDPSWpost: {
4116 unsigned Rt = Inst.getOperand(1).getReg();
4117 unsigned Rt2 = Inst.getOperand(2).getReg();
4118 if (Rt == Rt2)
4119 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4120 break;
4121 }
4122 case AArch64::STPDpost:
4123 case AArch64::STPDpre:
4124 case AArch64::STPQpost:
4125 case AArch64::STPQpre:
4126 case AArch64::STPSpost:
4127 case AArch64::STPSpre:
4128 case AArch64::STPWpost:
4129 case AArch64::STPWpre:
4130 case AArch64::STPXpost:
4131 case AArch64::STPXpre: {
4132 unsigned Rt = Inst.getOperand(1).getReg();
4133 unsigned Rt2 = Inst.getOperand(2).getReg();
4134 unsigned Rn = Inst.getOperand(3).getReg();
4135 if (RI->isSubRegisterEq(Rn, Rt))
4136 return Error(Loc[0], "unpredictable STP instruction, writeback base "
4137 "is also a source");
4138 if (RI->isSubRegisterEq(Rn, Rt2))
4139 return Error(Loc[1], "unpredictable STP instruction, writeback base "
4140 "is also a source");
4141 break;
4142 }
4143 case AArch64::LDRBBpre:
4144 case AArch64::LDRBpre:
4145 case AArch64::LDRHHpre:
4146 case AArch64::LDRHpre:
4147 case AArch64::LDRSBWpre:
4148 case AArch64::LDRSBXpre:
4149 case AArch64::LDRSHWpre:
4150 case AArch64::LDRSHXpre:
4151 case AArch64::LDRSWpre:
4152 case AArch64::LDRWpre:
4153 case AArch64::LDRXpre:
4154 case AArch64::LDRBBpost:
4155 case AArch64::LDRBpost:
4156 case AArch64::LDRHHpost:
4157 case AArch64::LDRHpost:
4158 case AArch64::LDRSBWpost:
4159 case AArch64::LDRSBXpost:
4160 case AArch64::LDRSHWpost:
4161 case AArch64::LDRSHXpost:
4162 case AArch64::LDRSWpost:
4163 case AArch64::LDRWpost:
4164 case AArch64::LDRXpost: {
4165 unsigned Rt = Inst.getOperand(1).getReg();
4166 unsigned Rn = Inst.getOperand(2).getReg();
4167 if (RI->isSubRegisterEq(Rn, Rt))
4168 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4169 "is also a source");
4170 break;
4171 }
4172 case AArch64::STRBBpost:
4173 case AArch64::STRBpost:
4174 case AArch64::STRHHpost:
4175 case AArch64::STRHpost:
4176 case AArch64::STRWpost:
4177 case AArch64::STRXpost:
4178 case AArch64::STRBBpre:
4179 case AArch64::STRBpre:
4180 case AArch64::STRHHpre:
4181 case AArch64::STRHpre:
4182 case AArch64::STRWpre:
4183 case AArch64::STRXpre: {
4184 unsigned Rt = Inst.getOperand(1).getReg();
4185 unsigned Rn = Inst.getOperand(2).getReg();
4186 if (RI->isSubRegisterEq(Rn, Rt))
4187 return Error(Loc[0], "unpredictable STR instruction, writeback base "
4188 "is also a source");
4189 break;
4190 }
4191 case AArch64::STXRB:
4192 case AArch64::STXRH:
4193 case AArch64::STXRW:
4194 case AArch64::STXRX:
4195 case AArch64::STLXRB:
4196 case AArch64::STLXRH:
4197 case AArch64::STLXRW:
4198 case AArch64::STLXRX: {
4199 unsigned Rs = Inst.getOperand(0).getReg();
4200 unsigned Rt = Inst.getOperand(1).getReg();
4201 unsigned Rn = Inst.getOperand(2).getReg();
4202 if (RI->isSubRegisterEq(Rt, Rs) ||
4203 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4204 return Error(Loc[0],
4205 "unpredictable STXR instruction, status is also a source");
4206 break;
4207 }
4208 case AArch64::STXPW:
4209 case AArch64::STXPX:
4210 case AArch64::STLXPW:
4211 case AArch64::STLXPX: {
4212 unsigned Rs = Inst.getOperand(0).getReg();
4213 unsigned Rt1 = Inst.getOperand(1).getReg();
4214 unsigned Rt2 = Inst.getOperand(2).getReg();
4215 unsigned Rn = Inst.getOperand(3).getReg();
4216 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4217 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4218 return Error(Loc[0],
4219 "unpredictable STXP instruction, status is also a source");
4220 break;
4221 }
4222 case AArch64::LDRABwriteback:
4223 case AArch64::LDRAAwriteback: {
4224 unsigned Xt = Inst.getOperand(0).getReg();
4225 unsigned Xn = Inst.getOperand(1).getReg();
4226 if (Xt == Xn)
4227 return Error(Loc[0],
4228 "unpredictable LDRA instruction, writeback base"
4229 " is also a destination");
4230 break;
4231 }
4232 }
4233
4234
4235 // Now check immediate ranges. Separate from the above as there is overlap
4236 // in the instructions being checked and this keeps the nested conditionals
4237 // to a minimum.
4238 switch (Inst.getOpcode()) {
4239 case AArch64::ADDSWri:
4240 case AArch64::ADDSXri:
4241 case AArch64::ADDWri:
4242 case AArch64::ADDXri:
4243 case AArch64::SUBSWri:
4244 case AArch64::SUBSXri:
4245 case AArch64::SUBWri:
4246 case AArch64::SUBXri: {
4247 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4248 // some slight duplication here.
4249 if (Inst.getOperand(2).isExpr()) {
4250 const MCExpr *Expr = Inst.getOperand(2).getExpr();
4251 AArch64MCExpr::VariantKind ELFRefKind;
4252 MCSymbolRefExpr::VariantKind DarwinRefKind;
4253 int64_t Addend;
4254 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4255
4256 // Only allow these with ADDXri.
4257 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4258 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4259 Inst.getOpcode() == AArch64::ADDXri)
4260 return false;
4261
4262 // Only allow these with ADDXri/ADDWri
4263 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4264 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4265 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4266 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4267 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4268 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4269 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4270 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4271 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4272 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4273 (Inst.getOpcode() == AArch64::ADDXri ||
4274 Inst.getOpcode() == AArch64::ADDWri))
4275 return false;
4276
4277 // Don't allow symbol refs in the immediate field otherwise
4278 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4279 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4280 // 'cmp w0, 'borked')
4281 return Error(Loc.back(), "invalid immediate expression");
4282 }
4283 // We don't validate more complex expressions here
4284 }
4285 return false;
4286 }
4287 default:
4288 return false;
4289 }
4290}
4291
4292static std::string AArch64MnemonicSpellCheck(StringRef S,
4293 const FeatureBitset &FBS,
4294 unsigned VariantID = 0);
4295
4296bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4297 uint64_t ErrorInfo,
4298 OperandVector &Operands) {
4299 switch (ErrCode) {
4300 case Match_InvalidTiedOperand: {
4301 RegConstraintEqualityTy EqTy =
4302 static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4303 .getRegEqualityTy();
4304 switch (EqTy) {
4305 case RegConstraintEqualityTy::EqualsSubReg:
4306 return Error(Loc, "operand must be 64-bit form of destination register");
4307 case RegConstraintEqualityTy::EqualsSuperReg:
4308 return Error(Loc, "operand must be 32-bit form of destination register");
4309 case RegConstraintEqualityTy::EqualsReg:
4310 return Error(Loc, "operand must match destination register");
4311 }
4312 llvm_unreachable("Unknown RegConstraintEqualityTy")::llvm::llvm_unreachable_internal("Unknown RegConstraintEqualityTy"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4312)
;
4313 }
4314 case Match_MissingFeature:
4315 return Error(Loc,
4316 "instruction requires a CPU feature not currently enabled");
4317 case Match_InvalidOperand:
4318 return Error(Loc, "invalid operand for instruction");
4319 case Match_InvalidSuffix:
4320 return Error(Loc, "invalid type suffix for instruction");
4321 case Match_InvalidCondCode:
4322 return Error(Loc, "expected AArch64 condition code");
4323 case Match_AddSubRegExtendSmall:
4324 return Error(Loc,
4325 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4326 case Match_AddSubRegExtendLarge:
4327 return Error(Loc,
4328 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4329 case Match_AddSubSecondSource:
4330 return Error(Loc,
4331 "expected compatible register, symbol or integer in range [0, 4095]");
4332 case Match_LogicalSecondSource:
4333 return Error(Loc, "expected compatible register or logical immediate");
4334 case Match_InvalidMovImm32Shift:
4335 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4336 case Match_InvalidMovImm64Shift:
4337 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4338 case Match_AddSubRegShift32:
4339 return Error(Loc,
4340 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4341 case Match_AddSubRegShift64:
4342 return Error(Loc,
4343 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4344 case Match_InvalidFPImm:
4345 return Error(Loc,
4346 "expected compatible register or floating-point constant");
4347 case Match_InvalidMemoryIndexedSImm6:
4348 return Error(Loc, "index must be an integer in range [-32, 31].");
4349 case Match_InvalidMemoryIndexedSImm5:
4350 return Error(Loc, "index must be an integer in range [-16, 15].");
4351 case Match_InvalidMemoryIndexed1SImm4:
4352 return Error(Loc, "index must be an integer in range [-8, 7].");
4353 case Match_InvalidMemoryIndexed2SImm4:
4354 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4355 case Match_InvalidMemoryIndexed3SImm4:
4356 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4357 case Match_InvalidMemoryIndexed4SImm4:
4358 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4359 case Match_InvalidMemoryIndexed16SImm4:
4360 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4361 case Match_InvalidMemoryIndexed32SImm4:
4362 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
4363 case Match_InvalidMemoryIndexed1SImm6:
4364 return Error(Loc, "index must be an integer in range [-32, 31].");
4365 case Match_InvalidMemoryIndexedSImm8:
4366 return Error(Loc, "index must be an integer in range [-128, 127].");
4367 case Match_InvalidMemoryIndexedSImm9:
4368 return Error(Loc, "index must be an integer in range [-256, 255].");
4369 case Match_InvalidMemoryIndexed16SImm9:
4370 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4371 case Match_InvalidMemoryIndexed8SImm10:
4372 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4373 case Match_InvalidMemoryIndexed4SImm7:
4374 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4375 case Match_InvalidMemoryIndexed8SImm7:
4376 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4377 case Match_InvalidMemoryIndexed16SImm7:
4378 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4379 case Match_InvalidMemoryIndexed8UImm5:
4380 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4381 case Match_InvalidMemoryIndexed4UImm5:
4382 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4383 case Match_InvalidMemoryIndexed2UImm5:
4384 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4385 case Match_InvalidMemoryIndexed8UImm6:
4386 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4387 case Match_InvalidMemoryIndexed16UImm6:
4388 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4389 case Match_InvalidMemoryIndexed4UImm6:
4390 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4391 case Match_InvalidMemoryIndexed2UImm6:
4392 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4393 case Match_InvalidMemoryIndexed1UImm6:
4394 return Error(Loc, "index must be in range [0, 63].");
4395 case Match_InvalidMemoryWExtend8:
4396 return Error(Loc,
4397 "expected 'uxtw' or 'sxtw' with optional shift of #0");
4398 case Match_InvalidMemoryWExtend16:
4399 return Error(Loc,
4400 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4401 case Match_InvalidMemoryWExtend32:
4402 return Error(Loc,
4403 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4404 case Match_InvalidMemoryWExtend64:
4405 return Error(Loc,
4406 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4407 case Match_InvalidMemoryWExtend128:
4408 return Error(Loc,
4409 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4410 case Match_InvalidMemoryXExtend8:
4411 return Error(Loc,
4412 "expected 'lsl' or 'sxtx' with optional shift of #0");
4413 case Match_InvalidMemoryXExtend16:
4414 return Error(Loc,
4415 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4416 case Match_InvalidMemoryXExtend32:
4417 return Error(Loc,
4418 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4419 case Match_InvalidMemoryXExtend64:
4420 return Error(Loc,
4421 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4422 case Match_InvalidMemoryXExtend128:
4423 return Error(Loc,
4424 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4425 case Match_InvalidMemoryIndexed1:
4426 return Error(Loc, "index must be an integer in range [0, 4095].");
4427 case Match_InvalidMemoryIndexed2:
4428 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4429 case Match_InvalidMemoryIndexed4:
4430 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4431 case Match_InvalidMemoryIndexed8:
4432 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4433 case Match_InvalidMemoryIndexed16:
4434 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4435 case Match_InvalidImm0_1:
4436 return Error(Loc, "immediate must be an integer in range [0, 1].");
4437 case Match_InvalidImm0_7:
4438 return Error(Loc, "immediate must be an integer in range [0, 7].");
4439 case Match_InvalidImm0_15:
4440 return Error(Loc, "immediate must be an integer in range [0, 15].");
4441 case Match_InvalidImm0_31:
4442 return Error(Loc, "immediate must be an integer in range [0, 31].");
4443 case Match_InvalidImm0_63:
4444 return Error(Loc, "immediate must be an integer in range [0, 63].");
4445 case Match_InvalidImm0_127:
4446 return Error(Loc, "immediate must be an integer in range [0, 127].");
4447 case Match_InvalidImm0_255:
4448 return Error(Loc, "immediate must be an integer in range [0, 255].");
4449 case Match_InvalidImm0_65535:
4450 return Error(Loc, "immediate must be an integer in range [0, 65535].");
4451 case Match_InvalidImm1_8:
4452 return Error(Loc, "immediate must be an integer in range [1, 8].");
4453 case Match_InvalidImm1_16:
4454 return Error(Loc, "immediate must be an integer in range [1, 16].");
4455 case Match_InvalidImm1_32:
4456 return Error(Loc, "immediate must be an integer in range [1, 32].");
4457 case Match_InvalidImm1_64:
4458 return Error(Loc, "immediate must be an integer in range [1, 64].");
4459 case Match_InvalidSVEAddSubImm8:
4460 return Error(Loc, "immediate must be an integer in range [0, 255]"
4461 " with a shift amount of 0");
4462 case Match_InvalidSVEAddSubImm16:
4463 case Match_InvalidSVEAddSubImm32:
4464 case Match_InvalidSVEAddSubImm64:
4465 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4466 "multiple of 256 in range [256, 65280]");
4467 case Match_InvalidSVECpyImm8:
4468 return Error(Loc, "immediate must be an integer in range [-128, 255]"
4469 " with a shift amount of 0");
4470 case Match_InvalidSVECpyImm16:
4471 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4472 "multiple of 256 in range [-32768, 65280]");
4473 case Match_InvalidSVECpyImm32:
4474 case Match_InvalidSVECpyImm64:
4475 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4476 "multiple of 256 in range [-32768, 32512]");
4477 case Match_InvalidIndexRange1_1:
4478 return Error(Loc, "expected lane specifier '[1]'");
4479 case Match_InvalidIndexRange0_15:
4480 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4481 case Match_InvalidIndexRange0_7:
4482 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4483 case Match_InvalidIndexRange0_3:
4484 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4485 case Match_InvalidIndexRange0_1:
4486 return Error(Loc, "vector lane must be an integer in range [0, 1].");
4487 case Match_InvalidSVEIndexRange0_63:
4488 return Error(Loc, "vector lane must be an integer in range [0, 63].");
4489 case Match_InvalidSVEIndexRange0_31:
4490 return Error(Loc, "vector lane must be an integer in range [0, 31].");
4491 case Match_InvalidSVEIndexRange0_15:
4492 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4493 case Match_InvalidSVEIndexRange0_7:
4494 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4495 case Match_InvalidSVEIndexRange0_3:
4496 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4497 case Match_InvalidLabel:
4498 return Error(Loc, "expected label or encodable integer pc offset");
4499 case Match_MRS:
4500 return Error(Loc, "expected readable system register");
4501 case Match_MSR:
4502 return Error(Loc, "expected writable system register or pstate");
4503 case Match_InvalidComplexRotationEven:
4504 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4505 case Match_InvalidComplexRotationOdd:
4506 return Error(Loc, "complex rotation must be 90 or 270.");
4507 case Match_MnemonicFail: {
4508 std::string Suggestion = AArch64MnemonicSpellCheck(
4509 ((AArch64Operand &)*Operands[0]).getToken(),
4510 ComputeAvailableFeatures(STI->getFeatureBits()));
4511 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4512 }
4513 case Match_InvalidGPR64shifted8:
4514 return Error(Loc, "register must be x0..x30 or xzr, without shift");
4515 case Match_InvalidGPR64shifted16:
4516 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4517 case Match_InvalidGPR64shifted32:
4518 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4519 case Match_InvalidGPR64shifted64:
4520 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4521 case Match_InvalidGPR64NoXZRshifted8:
4522 return Error(Loc, "register must be x0..x30 without shift");
4523 case Match_InvalidGPR64NoXZRshifted16:
4524 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4525 case Match_InvalidGPR64NoXZRshifted32:
4526 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4527 case Match_InvalidGPR64NoXZRshifted64:
4528 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4529 case Match_InvalidZPR32UXTW8:
4530 case Match_InvalidZPR32SXTW8:
4531 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4532 case Match_InvalidZPR32UXTW16:
4533 case Match_InvalidZPR32SXTW16:
4534 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4535 case Match_InvalidZPR32UXTW32:
4536 case Match_InvalidZPR32SXTW32:
4537 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4538 case Match_InvalidZPR32UXTW64:
4539 case Match_InvalidZPR32SXTW64:
4540 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4541 case Match_InvalidZPR64UXTW8:
4542 case Match_InvalidZPR64SXTW8:
4543 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4544 case Match_InvalidZPR64UXTW16:
4545 case Match_InvalidZPR64SXTW16:
4546 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4547 case Match_InvalidZPR64UXTW32:
4548 case Match_InvalidZPR64SXTW32:
4549 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4550 case Match_InvalidZPR64UXTW64:
4551 case Match_InvalidZPR64SXTW64:
4552 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4553 case Match_InvalidZPR32LSL8:
4554 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4555 case Match_InvalidZPR32LSL16:
4556 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4557 case Match_InvalidZPR32LSL32:
4558 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4559 case Match_InvalidZPR32LSL64:
4560 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4561 case Match_InvalidZPR64LSL8:
4562 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4563 case Match_InvalidZPR64LSL16:
4564 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4565 case Match_InvalidZPR64LSL32:
4566 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4567 case Match_InvalidZPR64LSL64:
4568 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4569 case Match_InvalidZPR0:
4570 return Error(Loc, "expected register without element width suffix");
4571 case Match_InvalidZPR8:
4572 case Match_InvalidZPR16:
4573 case Match_InvalidZPR32:
4574 case Match_InvalidZPR64:
4575 case Match_InvalidZPR128:
4576 return Error(Loc, "invalid element width");
4577 case Match_InvalidZPR_3b8:
4578 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4579 case Match_InvalidZPR_3b16:
4580 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4581 case Match_InvalidZPR_3b32:
4582 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4583 case Match_InvalidZPR_4b16:
4584 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4585 case Match_InvalidZPR_4b32:
4586 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4587 case Match_InvalidZPR_4b64:
4588 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4589 case Match_InvalidSVEPattern:
4590 return Error(Loc, "invalid predicate pattern");
4591 case Match_InvalidSVEPredicateAnyReg:
4592 case Match_InvalidSVEPredicateBReg:
4593 case Match_InvalidSVEPredicateHReg:
4594 case Match_InvalidSVEPredicateSReg:
4595 case Match_InvalidSVEPredicateDReg:
4596 return Error(Loc, "invalid predicate register.");
4597 case Match_InvalidSVEPredicate3bAnyReg:
4598 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
4599 case Match_InvalidSVEPredicate3bBReg:
4600 return Error(Loc, "invalid restricted predicate register, expected p0.b..p7.b");
4601 case Match_InvalidSVEPredicate3bHReg:
4602 return Error(Loc, "invalid restricted predicate register, expected p0.h..p7.h");
4603 case Match_InvalidSVEPredicate3bSReg:
4604 return Error(Loc, "invalid restricted predicate register, expected p0.s..p7.s");
4605 case Match_InvalidSVEPredicate3bDReg:
4606 return Error(Loc, "invalid restricted predicate register, expected p0.d..p7.d");
4607 case Match_InvalidSVEExactFPImmOperandHalfOne:
4608 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4609 case Match_InvalidSVEExactFPImmOperandHalfTwo:
4610 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4611 case Match_InvalidSVEExactFPImmOperandZeroOne:
4612 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4613 default:
4614 llvm_unreachable("unexpected error code!")::llvm::llvm_unreachable_internal("unexpected error code!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4614)
;
4615 }
4616}
4617
4618static const char *getSubtargetFeatureName(uint64_t Val);
4619
4620bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4621 OperandVector &Operands,
4622 MCStreamer &Out,
4623 uint64_t &ErrorInfo,
4624 bool MatchingInlineAsm) {
4625 assert(!Operands.empty() && "Unexpect empty operand list!")((!Operands.empty() && "Unexpect empty operand list!"
) ? static_cast<void> (0) : __assert_fail ("!Operands.empty() && \"Unexpect empty operand list!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4625, __PRETTY_FUNCTION__))
;
4626 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4627 assert(Op.isToken() && "Leading operand should always be a mnemonic!")((Op.isToken() && "Leading operand should always be a mnemonic!"
) ? static_cast<void> (0) : __assert_fail ("Op.isToken() && \"Leading operand should always be a mnemonic!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4627, __PRETTY_FUNCTION__))
;
4628
4629 StringRef Tok = Op.getToken();
4630 unsigned NumOperands = Operands.size();
4631
4632 if (NumOperands == 4 && Tok == "lsl") {
4633 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4634 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4635 if (Op2.isScalarReg() && Op3.isImm()) {
4636 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4637 if (Op3CE) {
4638 uint64_t Op3Val = Op3CE->getValue();
4639 uint64_t NewOp3Val = 0;
4640 uint64_t NewOp4Val = 0;
4641 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4642 Op2.getReg())) {
4643 NewOp3Val = (32 - Op3Val) & 0x1f;
4644 NewOp4Val = 31 - Op3Val;
4645 } else {
4646 NewOp3Val = (64 - Op3Val) & 0x3f;
4647 NewOp4Val = 63 - Op3Val;
4648 }
4649
4650 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4651 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4652
4653 Operands[0] = AArch64Operand::CreateToken(
4654 "ubfm", false, Op.getStartLoc(), getContext());
4655 Operands.push_back(AArch64Operand::CreateImm(
4656 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4657 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4658 Op3.getEndLoc(), getContext());
4659 }
4660 }
4661 } else if (NumOperands == 4 && Tok == "bfc") {
4662 // FIXME: Horrible hack to handle BFC->BFM alias.
4663 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4664 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4665 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4666
4667 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4668 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4669 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4670
4671 if (LSBCE && WidthCE) {
4672 uint64_t LSB = LSBCE->getValue();
4673 uint64_t Width = WidthCE->getValue();
4674
4675 uint64_t RegWidth = 0;
4676 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4677 Op1.getReg()))
4678 RegWidth = 64;
4679 else
4680 RegWidth = 32;
4681
4682 if (LSB >= RegWidth)
4683 return Error(LSBOp.getStartLoc(),
4684 "expected integer in range [0, 31]");
4685 if (Width < 1 || Width > RegWidth)
4686 return Error(WidthOp.getStartLoc(),
4687 "expected integer in range [1, 32]");
4688
4689 uint64_t ImmR = 0;
4690 if (RegWidth == 32)
4691 ImmR = (32 - LSB) & 0x1f;
4692 else
4693 ImmR = (64 - LSB) & 0x3f;
4694
4695 uint64_t ImmS = Width - 1;
4696
4697 if (ImmR != 0 && ImmS >= ImmR)
4698 return Error(WidthOp.getStartLoc(),
4699 "requested insert overflows register");
4700
4701 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4702 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4703 Operands[0] = AArch64Operand::CreateToken(
4704 "bfm", false, Op.getStartLoc(), getContext());
4705 Operands[2] = AArch64Operand::CreateReg(
4706 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4707 SMLoc(), SMLoc(), getContext());
4708 Operands[3] = AArch64Operand::CreateImm(
4709 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4710 Operands.emplace_back(
4711 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4712 WidthOp.getEndLoc(), getContext()));
4713 }
4714 }
4715 } else if (NumOperands == 5) {
4716 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4717 // UBFIZ -> UBFM aliases.
4718 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4719 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4720 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4721 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4722
4723 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4724 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4725 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4726
4727 if (Op3CE && Op4CE) {
4728 uint64_t Op3Val = Op3CE->getValue();
4729 uint64_t Op4Val = Op4CE->getValue();
4730
4731 uint64_t RegWidth = 0;
4732 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4733 Op1.getReg()))
4734 RegWidth = 64;
4735 else
4736 RegWidth = 32;
4737
4738 if (Op3Val >= RegWidth)
4739 return Error(Op3.getStartLoc(),
4740 "expected integer in range [0, 31]");
4741 if (Op4Val < 1 || Op4Val > RegWidth)
4742 return Error(Op4.getStartLoc(),
4743 "expected integer in range [1, 32]");
4744
4745 uint64_t NewOp3Val = 0;
4746 if (RegWidth == 32)
4747 NewOp3Val = (32 - Op3Val) & 0x1f;
4748 else
4749 NewOp3Val = (64 - Op3Val) & 0x3f;
4750
4751 uint64_t NewOp4Val = Op4Val - 1;
4752
4753 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4754 return Error(Op4.getStartLoc(),
4755 "requested insert overflows register");
4756
4757 const MCExpr *NewOp3 =
4758 MCConstantExpr::create(NewOp3Val, getContext());
4759 const MCExpr *NewOp4 =
4760 MCConstantExpr::create(NewOp4Val, getContext());
4761 Operands[3] = AArch64Operand::CreateImm(
4762 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4763 Operands[4] = AArch64Operand::CreateImm(
4764 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4765 if (Tok == "bfi")
4766 Operands[0] = AArch64Operand::CreateToken(
4767 "bfm", false, Op.getStartLoc(), getContext());
4768 else if (Tok == "sbfiz")
4769 Operands[0] = AArch64Operand::CreateToken(
4770 "sbfm", false, Op.getStartLoc(), getContext());
4771 else if (Tok == "ubfiz")
4772 Operands[0] = AArch64Operand::CreateToken(
4773 "ubfm", false, Op.getStartLoc(), getContext());
4774 else
4775 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4775)
;
4776 }
4777 }
4778
4779 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4780 // UBFX -> UBFM aliases.
4781 } else if (NumOperands == 5 &&
4782 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4783 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4784 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4785 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4786
4787 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4788 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4789 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4790
4791 if (Op3CE && Op4CE) {
4792 uint64_t Op3Val = Op3CE->getValue();
4793 uint64_t Op4Val = Op4CE->getValue();
4794
4795 uint64_t RegWidth = 0;
4796 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4797 Op1.getReg()))
4798 RegWidth = 64;
4799 else
4800 RegWidth = 32;
4801
4802 if (Op3Val >= RegWidth)
4803 return Error(Op3.getStartLoc(),
4804 "expected integer in range [0, 31]");
4805 if (Op4Val < 1 || Op4Val > RegWidth)
4806 return Error(Op4.getStartLoc(),
4807 "expected integer in range [1, 32]");
4808
4809 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4810
4811 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4812 return Error(Op4.getStartLoc(),
4813 "requested extract overflows register");
4814
4815 const MCExpr *NewOp4 =
4816 MCConstantExpr::create(NewOp4Val, getContext());
4817 Operands[4] = AArch64Operand::CreateImm(
4818 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4819 if (Tok == "bfxil")
4820 Operands[0] = AArch64Operand::CreateToken(
4821 "bfm", false, Op.getStartLoc(), getContext());
4822 else if (Tok == "sbfx")
4823 Operands[0] = AArch64Operand::CreateToken(
4824 "sbfm", false, Op.getStartLoc(), getContext());
4825 else if (Tok == "ubfx")
4826 Operands[0] = AArch64Operand::CreateToken(
4827 "ubfm", false, Op.getStartLoc(), getContext());
4828 else
4829 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4829)
;
4830 }
4831 }
4832 }
4833 }
4834
4835 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4836 // instruction for FP registers correctly in some rare circumstances. Convert
4837 // it to a safe instruction and warn (because silently changing someone's
4838 // assembly is rude).
4839 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4840 NumOperands == 4 && Tok == "movi") {
4841 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4842 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4843 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4844 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4845 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4846 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4847 if (Suffix.lower() == ".2d" &&
4848 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4849 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4850 " correctly on this CPU, converting to equivalent movi.16b");
4851 // Switch the suffix to .16b.
4852 unsigned Idx = Op1.isToken() ? 1 : 2;
4853 Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4854 getContext());
4855 }
4856 }
4857 }
4858
4859 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4860 // InstAlias can't quite handle this since the reg classes aren't
4861 // subclasses.
4862 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4863 // The source register can be Wn here, but the matcher expects a
4864 // GPR64. Twiddle it here if necessary.
4865 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4866 if (Op.isScalarReg()) {
4867 unsigned Reg = getXRegFromWReg(Op.getReg());
4868 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4869 Op.getStartLoc(), Op.getEndLoc(),
4870 getContext());
4871 }
4872 }
4873 // FIXME: Likewise for sxt[bh] with a Xd dst operand
4874 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4875 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4876 if (Op.isScalarReg() &&
4877 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4878 Op.getReg())) {
4879 // The source register can be Wn here, but the matcher expects a
4880 // GPR64. Twiddle it here if necessary.
4881 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4882 if (Op.isScalarReg()) {
4883 unsigned Reg = getXRegFromWReg(Op.getReg());
4884 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4885 Op.getStartLoc(),
4886 Op.getEndLoc(), getContext());
4887 }
4888 }
4889 }
4890 // FIXME: Likewise for uxt[bh] with a Xd dst operand
4891 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4892 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4893 if (Op.isScalarReg() &&
4894 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4895 Op.getReg())) {
4896 // The source register can be Wn here, but the matcher expects a
4897 // GPR32. Twiddle it here if necessary.
4898 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4899 if (Op.isScalarReg()) {
4900 unsigned Reg = getWRegFromXReg(Op.getReg());
4901 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4902 Op.getStartLoc(),
4903 Op.getEndLoc(), getContext());
4904 }
4905 }
4906 }
4907
4908 MCInst Inst;
4909 FeatureBitset MissingFeatures;
4910 // First try to match against the secondary set of tables containing the
4911 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4912 unsigned MatchResult =
4913 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4914 MatchingInlineAsm, 1);
4915
4916 // If that fails, try against the alternate table containing long-form NEON:
4917 // "fadd v0.2s, v1.2s, v2.2s"
4918 if (MatchResult != Match_Success) {
4919 // But first, save the short-form match result: we can use it in case the
4920 // long-form match also fails.
4921 auto ShortFormNEONErrorInfo = ErrorInfo;
4922 auto ShortFormNEONMatchResult = MatchResult;
4923 auto ShortFormNEONMissingFeatures = MissingFeatures;
4924
4925 MatchResult =
4926 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4927 MatchingInlineAsm, 0);
4928
4929 // Now, both matches failed, and the long-form match failed on the mnemonic
4930 // suffix token operand. The short-form match failure is probably more
4931 // relevant: use it instead.
4932 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4933 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4934 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4935 MatchResult = ShortFormNEONMatchResult;
4936 ErrorInfo = ShortFormNEONErrorInfo;
4937 MissingFeatures = ShortFormNEONMissingFeatures;
4938 }
4939 }
4940
4941 switch (MatchResult) {
4942 case Match_Success: {
4943 // Perform range checking and other semantic validations
4944 SmallVector<SMLoc, 8> OperandLocs;
4945 NumOperands = Operands.size();
4946 for (unsigned i = 1; i < NumOperands; ++i)
4947 OperandLocs.push_back(Operands[i]->getStartLoc());
4948 if (validateInstruction(Inst, IDLoc, OperandLocs))
4949 return true;
4950
4951 Inst.setLoc(IDLoc);
4952 Out.emitInstruction(Inst, getSTI());
4953 return false;
4954 }
4955 case Match_MissingFeature: {
4956 assert(MissingFeatures.any() && "Unknown missing feature!")((MissingFeatures.any() && "Unknown missing feature!"
) ? static_cast<void> (0) : __assert_fail ("MissingFeatures.any() && \"Unknown missing feature!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4956, __PRETTY_FUNCTION__))
;
4957 // Special case the error message for the very common case where only
4958 // a single subtarget feature is missing (neon, e.g.).
4959 std::string Msg = "instruction requires:";
4960 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
4961 if (MissingFeatures[i]) {
4962 Msg += " ";
4963 Msg += getSubtargetFeatureName(i);
4964 }
4965 }
4966 return Error(IDLoc, Msg);
4967 }
4968 case Match_MnemonicFail:
4969 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
4970 case Match_InvalidOperand: {
4971 SMLoc ErrorLoc = IDLoc;
4972
4973 if (ErrorInfo != ~0ULL) {
4974 if (ErrorInfo >= Operands.size())
4975 return Error(IDLoc, "too few operands for instruction",
4976 SMRange(IDLoc, getTok().getLoc()));
4977
4978 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4979 if (ErrorLoc == SMLoc())
4980 ErrorLoc = IDLoc;
4981 }
4982 // If the match failed on a suffix token operand, tweak the diagnostic
4983 // accordingly.
4984 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4985 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4986 MatchResult = Match_InvalidSuffix;
4987
4988 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4989 }
4990 case Match_InvalidTiedOperand:
4991 case Match_InvalidMemoryIndexed1:
4992 case Match_InvalidMemoryIndexed2:
4993 case Match_InvalidMemoryIndexed4:
4994 case Match_InvalidMemoryIndexed8:
4995 case Match_InvalidMemoryIndexed16:
4996 case Match_InvalidCondCode:
4997 case Match_AddSubRegExtendSmall:
4998 case Match_AddSubRegExtendLarge:
4999 case Match_AddSubSecondSource:
5000 case Match_LogicalSecondSource:
5001 case Match_AddSubRegShift32:
5002 case Match_AddSubRegShift64:
5003 case Match_InvalidMovImm32Shift:
5004 case Match_InvalidMovImm64Shift:
5005 case Match_InvalidFPImm:
5006 case Match_InvalidMemoryWExtend8:
5007 case Match_InvalidMemoryWExtend16:
5008 case Match_InvalidMemoryWExtend32:
5009 case Match_InvalidMemoryWExtend64:
5010 case Match_InvalidMemoryWExtend128:
5011 case Match_InvalidMemoryXExtend8:
5012 case Match_InvalidMemoryXExtend16:
5013 case Match_InvalidMemoryXExtend32:
5014 case Match_InvalidMemoryXExtend64:
5015 case Match_InvalidMemoryXExtend128:
5016 case Match_InvalidMemoryIndexed1SImm4:
5017 case Match_InvalidMemoryIndexed2SImm4:
5018 case Match_InvalidMemoryIndexed3SImm4:
5019 case Match_InvalidMemoryIndexed4SImm4:
5020 case Match_InvalidMemoryIndexed1SImm6:
5021 case Match_InvalidMemoryIndexed16SImm4:
5022 case Match_InvalidMemoryIndexed32SImm4:
5023 case Match_InvalidMemoryIndexed4SImm7:
5024 case Match_InvalidMemoryIndexed8SImm7:
5025 case Match_InvalidMemoryIndexed16SImm7:
5026 case Match_InvalidMemoryIndexed8UImm5:
5027 case Match_InvalidMemoryIndexed4UImm5:
5028 case Match_InvalidMemoryIndexed2UImm5:
5029 case Match_InvalidMemoryIndexed1UImm6:
5030 case Match_InvalidMemoryIndexed2UImm6:
5031 case Match_InvalidMemoryIndexed4UImm6:
5032 case Match_InvalidMemoryIndexed8UImm6:
5033 case Match_InvalidMemoryIndexed16UImm6:
5034 case Match_InvalidMemoryIndexedSImm6:
5035 case Match_InvalidMemoryIndexedSImm5:
5036 case Match_InvalidMemoryIndexedSImm8:
5037 case Match_InvalidMemoryIndexedSImm9:
5038 case Match_InvalidMemoryIndexed16SImm9:
5039 case Match_InvalidMemoryIndexed8SImm10:
5040 case Match_InvalidImm0_1:
5041 case Match_InvalidImm0_7:
5042 case Match_InvalidImm0_15:
5043 case Match_InvalidImm0_31:
5044 case Match_InvalidImm0_63:
5045 case Match_InvalidImm0_127:
5046 case Match_InvalidImm0_255:
5047 case Match_InvalidImm0_65535:
5048 case Match_InvalidImm1_8:
5049 case Match_InvalidImm1_16:
5050 case Match_InvalidImm1_32:
5051 case Match_InvalidImm1_64:
5052 case Match_InvalidSVEAddSubImm8:
5053 case Match_InvalidSVEAddSubImm16:
5054 case Match_InvalidSVEAddSubImm32:
5055 case Match_InvalidSVEAddSubImm64:
5056 case Match_InvalidSVECpyImm8:
5057 case Match_InvalidSVECpyImm16:
5058 case Match_InvalidSVECpyImm32:
5059 case Match_InvalidSVECpyImm64:
5060 case Match_InvalidIndexRange1_1:
5061 case Match_InvalidIndexRange0_15:
5062 case Match_InvalidIndexRange0_7:
5063 case Match_InvalidIndexRange0_3:
5064 case Match_InvalidIndexRange0_1:
5065 case Match_InvalidSVEIndexRange0_63:
5066 case Match_InvalidSVEIndexRange0_31:
5067 case Match_InvalidSVEIndexRange0_15:
5068 case Match_InvalidSVEIndexRange0_7:
5069 case Match_InvalidSVEIndexRange0_3:
5070 case Match_InvalidLabel:
5071 case Match_InvalidComplexRotationEven:
5072 case Match_InvalidComplexRotationOdd:
5073 case Match_InvalidGPR64shifted8:
5074 case Match_InvalidGPR64shifted16:
5075 case Match_InvalidGPR64shifted32:
5076 case Match_InvalidGPR64shifted64:
5077 case Match_InvalidGPR64NoXZRshifted8:
5078 case Match_InvalidGPR64NoXZRshifted16:
5079 case Match_InvalidGPR64NoXZRshifted32:
5080 case Match_InvalidGPR64NoXZRshifted64:
5081 case Match_InvalidZPR32UXTW8:
5082 case Match_InvalidZPR32UXTW16:
5083 case Match_InvalidZPR32UXTW32:
5084 case Match_InvalidZPR32UXTW64:
5085 case Match_InvalidZPR32SXTW8:
5086 case Match_InvalidZPR32SXTW16:
5087 case Match_InvalidZPR32SXTW32:
5088 case Match_InvalidZPR32SXTW64:
5089 case Match_InvalidZPR64UXTW8:
5090 case Match_InvalidZPR64SXTW8:
5091 case Match_InvalidZPR64UXTW16:
5092 case Match_InvalidZPR64SXTW16:
5093 case Match_InvalidZPR64UXTW32:
5094 case Match_InvalidZPR64SXTW32:
5095 case Match_InvalidZPR64UXTW64:
5096 case Match_InvalidZPR64SXTW64:
5097 case Match_InvalidZPR32LSL8:
5098 case Match_InvalidZPR32LSL16:
5099 case Match_InvalidZPR32LSL32:
5100 case Match_InvalidZPR32LSL64:
5101 case Match_InvalidZPR64LSL8:
5102 case Match_InvalidZPR64LSL16:
5103 case Match_InvalidZPR64LSL32:
5104 case Match_InvalidZPR64LSL64:
5105 case Match_InvalidZPR0:
5106 case Match_InvalidZPR8:
5107 case Match_InvalidZPR16:
5108 case Match_InvalidZPR32:
5109 case Match_InvalidZPR64:
5110 case Match_InvalidZPR128:
5111 case Match_InvalidZPR_3b8:
5112 case Match_InvalidZPR_3b16:
5113 case Match_InvalidZPR_3b32:
5114 case Match_InvalidZPR_4b16:
5115 case Match_InvalidZPR_4b32:
5116 case Match_InvalidZPR_4b64:
5117 case Match_InvalidSVEPredicateAnyReg:
5118 case Match_InvalidSVEPattern:
5119 case Match_InvalidSVEPredicateBReg:
5120 case Match_InvalidSVEPredicateHReg:
5121 case Match_InvalidSVEPredicateSReg:
5122 case Match_InvalidSVEPredicateDReg:
5123 case Match_InvalidSVEPredicate3bAnyReg:
5124 case Match_InvalidSVEPredicate3bBReg:
5125 case Match_InvalidSVEPredicate3bHReg:
5126 case Match_InvalidSVEPredicate3bSReg:
5127 case Match_InvalidSVEPredicate3bDReg:
5128 case Match_InvalidSVEExactFPImmOperandHalfOne:
5129 case Match_InvalidSVEExactFPImmOperandHalfTwo:
5130 case Match_InvalidSVEExactFPImmOperandZeroOne:
5131 case Match_MSR:
5132 case Match_MRS: {
5133 if (ErrorInfo >= Operands.size())
5134 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5135 // Any time we get here, there's nothing fancy to do. Just get the
5136 // operand SMLoc and display the diagnostic.
5137 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5138 if (ErrorLoc == SMLoc())
5139 ErrorLoc = IDLoc;
5140 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5141 }
5142 }
5143
5144 llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5144)
;
5145}
5146
5147/// ParseDirective parses the arm specific directives
5148bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5149 const MCObjectFileInfo::Environment Format =
5150 getContext().getObjectFileInfo()->getObjectFileType();
5151 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
1
Assuming 'Format' is not equal to IsMachO
5152 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
2
Assuming 'Format' is equal to IsCOFF
5153
5154 auto IDVal = DirectiveID.getIdentifier().lower();
5155 SMLoc Loc = DirectiveID.getLoc();
5156 if (IDVal == ".arch")
3
Taking false branch
5157 parseDirectiveArch(Loc);
5158 else if (IDVal == ".cpu")
4
Taking false branch
5159 parseDirectiveCPU(Loc);
5160 else if (IDVal == ".tlsdesccall")
5
Taking false branch
5161 parseDirectiveTLSDescCall(Loc);
5162 else if (IDVal == ".ltorg" || IDVal == ".pool")
6
Taking false branch
5163 parseDirectiveLtorg(Loc);
5164 else if (IDVal == ".unreq")
7
Taking false branch
5165 parseDirectiveUnreq(Loc);
5166 else if (IDVal == ".inst")
8
Taking false branch
5167 parseDirectiveInst(Loc);
5168 else if (IDVal == ".cfi_negate_ra_state")
9
Taking false branch
5169 parseDirectiveCFINegateRAState();
5170 else if (IDVal == ".cfi_b_key_frame")
10
Taking false branch
5171 parseDirectiveCFIBKeyFrame();
5172 else if (IDVal == ".arch_extension")
11
Taking false branch
5173 parseDirectiveArchExtension(Loc);
5174 else if (IsMachO
11.1
'IsMachO' is false
) {
12
Taking false branch
5175 if (IDVal == MCLOHDirectiveName())
5176 parseDirectiveLOH(IDVal, Loc);
5177 else
5178 return true;
5179 } else if (IsCOFF
12.1
'IsCOFF' is true
) {
13
Taking true branch
5180 if (IDVal == ".seh_stackalloc")
14
Taking false branch
5181 parseDirectiveSEHAllocStack(Loc);
5182 else if (IDVal == ".seh_endprologue")
15
Taking false branch
5183 parseDirectiveSEHPrologEnd(Loc);
5184 else if (IDVal == ".seh_save_r19r20_x")
16
Taking false branch
5185 parseDirectiveSEHSaveR19R20X(Loc);
5186 else if (IDVal == ".seh_save_fplr")
17
Taking false branch
5187 parseDirectiveSEHSaveFPLR(Loc);
5188 else if (IDVal == ".seh_save_fplr_x")
18
Taking false branch
5189 parseDirectiveSEHSaveFPLRX(Loc);
5190 else if (IDVal == ".seh_save_reg")
19
Taking false branch
5191 parseDirectiveSEHSaveReg(Loc);
5192 else if (IDVal == ".seh_save_reg_x")
20
Taking false branch
5193 parseDirectiveSEHSaveRegX(Loc);
5194 else if (IDVal == ".seh_save_regp")
21
Taking true branch
5195 parseDirectiveSEHSaveRegP(Loc);
22
Calling 'AArch64AsmParser::parseDirectiveSEHSaveRegP'
5196 else if (IDVal == ".seh_save_regp_x")
5197 parseDirectiveSEHSaveRegPX(Loc);
5198 else if (IDVal == ".seh_save_lrpair")
5199 parseDirectiveSEHSaveLRPair(Loc);
5200 else if (IDVal == ".seh_save_freg")
5201 parseDirectiveSEHSaveFReg(Loc);
5202 else if (IDVal == ".seh_save_freg_x")
5203 parseDirectiveSEHSaveFRegX(Loc);
5204 else if (IDVal == ".seh_save_fregp")
5205 parseDirectiveSEHSaveFRegP(Loc);
5206 else if (IDVal == ".seh_save_fregp_x")
5207 parseDirectiveSEHSaveFRegPX(Loc);
5208 else if (IDVal == ".seh_set_fp")
5209 parseDirectiveSEHSetFP(Loc);
5210 else if (IDVal == ".seh_add_fp")
5211 parseDirectiveSEHAddFP(Loc);
5212 else if (IDVal == ".seh_nop")
5213 parseDirectiveSEHNop(Loc);
5214 else if (IDVal == ".seh_save_next")
5215 parseDirectiveSEHSaveNext(Loc);
5216 else if (IDVal == ".seh_startepilogue")
5217 parseDirectiveSEHEpilogStart(Loc);
5218 else if (IDVal == ".seh_endepilogue")
5219 parseDirectiveSEHEpilogEnd(Loc);
5220 else if (IDVal == ".seh_trap_frame")
5221 parseDirectiveSEHTrapFrame(Loc);
5222 else if (IDVal == ".seh_pushframe")
5223 parseDirectiveSEHMachineFrame(Loc);
5224 else if (IDVal == ".seh_context")
5225 parseDirectiveSEHContext(Loc);
5226 else if (IDVal == ".seh_clear_unwound_to_call")
5227 parseDirectiveSEHClearUnwoundToCall(Loc);
5228 else
5229 return true;
5230 } else
5231 return true;
5232 return false;
5233}
5234
5235static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
5236 SmallVector<StringRef, 4> &RequestedExtensions) {
5237 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
5238 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
5239
5240 if (!NoCrypto && Crypto) {
5241 switch (ArchKind) {
5242 default:
5243 // Map 'generic' (and others) to sha2 and aes, because
5244 // that was the traditional meaning of crypto.
5245 case AArch64::ArchKind::ARMV8_1A:
5246 case AArch64::ArchKind::ARMV8_2A:
5247 case AArch64::ArchKind::ARMV8_3A:
5248 RequestedExtensions.push_back("sha2");
5249 RequestedExtensions.push_back("aes");
5250 break;
5251 case AArch64::ArchKind::ARMV8_4A:
5252 case AArch64::ArchKind::ARMV8_5A:
5253 case AArch64::ArchKind::ARMV8_6A:
5254 RequestedExtensions.push_back("sm4");
5255 RequestedExtensions.push_back("sha3");
5256 RequestedExtensions.push_back("sha2");
5257 RequestedExtensions.push_back("aes");
5258 break;
5259 }
5260 } else if (NoCrypto) {
5261 switch (ArchKind) {
5262 default:
5263 // Map 'generic' (and others) to sha2 and aes, because
5264 // that was the traditional meaning of crypto.
5265 case AArch64::ArchKind::ARMV8_1A:
5266 case AArch64::ArchKind::ARMV8_2A:
5267 case AArch64::ArchKind::ARMV8_3A:
5268 RequestedExtensions.push_back("nosha2");
5269 RequestedExtensions.push_back("noaes");
5270 break;
5271 case AArch64::ArchKind::ARMV8_4A:
5272 case AArch64::ArchKind::ARMV8_5A:
5273 case AArch64::ArchKind::ARMV8_6A:
5274 RequestedExtensions.push_back("nosm4");
5275 RequestedExtensions.push_back("nosha3");
5276 RequestedExtensions.push_back("nosha2");
5277 RequestedExtensions.push_back("noaes");
5278 break;
5279 }
5280 }
5281}
5282
5283/// parseDirectiveArch
5284/// ::= .arch token
5285bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
5286 SMLoc ArchLoc = getLoc();
5287
5288 StringRef Arch, ExtensionString;
5289 std::tie(Arch, ExtensionString) =
5290 getParser().parseStringToEndOfStatement().trim().split('+');
5291
5292 AArch64::ArchKind ID = AArch64::parseArch(Arch);
5293 if (ID == AArch64::ArchKind::INVALID)
5294 return Error(ArchLoc, "unknown arch name");
5295
5296 if (parseToken(AsmToken::EndOfStatement))
5297 return true;
5298
5299 // Get the architecture and extension features.
5300 std::vector<StringRef> AArch64Features;
5301 AArch64::getArchFeatures(ID, AArch64Features);
5302 AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
5303 AArch64Features);
5304
5305 MCSubtargetInfo &STI = copySTI();
5306 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
5307 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
5308 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
5309
5310 SmallVector<StringRef, 4> RequestedExtensions;
5311 if (!ExtensionString.empty())
5312 ExtensionString.split(RequestedExtensions, '+');
5313
5314 ExpandCryptoAEK(ID, RequestedExtensions);
5315
5316 FeatureBitset Features = STI.getFeatureBits();
5317 for (auto Name : RequestedExtensions) {
5318 bool EnableFeature = true;
5319
5320 if (Name.startswith_lower("no")) {
5321 EnableFeature = false;
5322 Name = Name.substr(2);
5323 }
5324
5325 for (const auto &Extension : ExtensionMap) {
5326 if (Extension.Name != Name)
5327 continue;
5328
5329 if (Extension.Features.none())
5330 report_fatal_error("unsupported architectural extension: " + Name);
5331
5332 FeatureBitset ToggleFeatures = EnableFeature
5333 ? (~Features & Extension.Features)
5334 : ( Features & Extension.Features);
5335 FeatureBitset Features =
5336 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5337 setAvailableFeatures(Features);
5338 break;
5339 }
5340 }
5341 return false;
5342}
5343
5344/// parseDirectiveArchExtension
5345/// ::= .arch_extension [no]feature
5346bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
5347 SMLoc ExtLoc = getLoc();
5348
5349 StringRef Name = getParser().parseStringToEndOfStatement().trim();
5350
5351 if (parseToken(AsmToken::EndOfStatement,
5352 "unexpected token in '.arch_extension' directive"))
5353 return true;
5354
5355 bool EnableFeature = true;
5356 if (Name.startswith_lower("no")) {
5357 EnableFeature = false;
5358 Name = Name.substr(2);
5359 }
5360
5361 MCSubtargetInfo &STI = copySTI();
5362 FeatureBitset Features = STI.getFeatureBits();
5363 for (const auto &Extension : ExtensionMap) {
5364 if (Extension.Name != Name)
5365 continue;
5366
5367 if (Extension.Features.none())
5368 return Error(ExtLoc, "unsupported architectural extension: " + Name);
5369
5370 FeatureBitset ToggleFeatures = EnableFeature
5371 ? (~Features & Extension.Features)
5372 : (Features & Extension.Features);
5373 FeatureBitset Features =
5374 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5375 setAvailableFeatures(Features);
5376 return false;
5377 }
5378
5379 return Error(ExtLoc, "unknown architectural extension: " + Name);
5380}
5381
5382static SMLoc incrementLoc(SMLoc L, int Offset) {
5383 return SMLoc::getFromPointer(L.getPointer() + Offset);
5384}
5385
5386/// parseDirectiveCPU
5387/// ::= .cpu id
5388bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5389 SMLoc CurLoc = getLoc();
5390
5391 StringRef CPU, ExtensionString;
5392 std::tie(CPU, ExtensionString) =
5393 getParser().parseStringToEndOfStatement().trim().split('+');
5394
5395 if (parseToken(AsmToken::EndOfStatement))
5396 return true;
5397
5398 SmallVector<StringRef, 4> RequestedExtensions;
5399 if (!ExtensionString.empty())
5400 ExtensionString.split(RequestedExtensions, '+');
5401
5402 // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5403 // once that is tablegen'ed
5404 if (!getSTI().isCPUStringValid(CPU)) {
5405 Error(CurLoc, "unknown CPU name");
5406 return false;
5407 }
5408
5409 MCSubtargetInfo &STI = copySTI();
5410 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
5411 CurLoc = incrementLoc(CurLoc, CPU.size());
5412
5413 ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5414
5415 FeatureBitset Features = STI.getFeatureBits();
5416 for (auto Name : RequestedExtensions) {
5417 // Advance source location past '+'.
5418 CurLoc = incrementLoc(CurLoc, 1);
5419
5420 bool EnableFeature = true;
5421
5422 if (Name.startswith_lower("no")) {
5423 EnableFeature = false;
5424 Name = Name.substr(2);
5425 }
5426
5427 bool FoundExtension = false;
5428 for (const auto &Extension : ExtensionMap) {
5429 if (Extension.Name != Name)
5430 continue;
5431
5432 if (Extension.Features.none())
5433 report_fatal_error("unsupported architectural extension: " + Name);
5434
5435 FeatureBitset ToggleFeatures = EnableFeature
5436 ? (~Features & Extension.Features)
5437 : ( Features & Extension.Features);
5438 FeatureBitset Features =
5439 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5440 setAvailableFeatures(Features);
5441 FoundExtension = true;
5442
5443 break;
5444 }
5445
5446 if (!FoundExtension)
5447 Error(CurLoc, "unsupported architectural extension");
5448
5449 CurLoc = incrementLoc(CurLoc, Name.size());
5450 }
5451 return false;
5452}
5453
5454/// parseDirectiveInst
5455/// ::= .inst opcode [, ...]
5456bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5457 if (getLexer().is(AsmToken::EndOfStatement))
5458 return Error(Loc, "expected expression following '.inst' directive");
5459
5460 auto parseOp = [&]() -> bool {
5461 SMLoc L = getLoc();
5462 const MCExpr *Expr = nullptr;
5463 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5464 return true;
5465 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5466 if (check(!Value, L, "expected constant expression"))
5467 return true;
5468 getTargetStreamer().emitInst(Value->getValue());
5469 return false;
5470 };
5471
5472 if (parseMany(parseOp))
5473 return addErrorSuffix(" in '.inst' directive");
5474 return false;
5475}
5476
5477// parseDirectiveTLSDescCall:
5478// ::= .tlsdesccall symbol
5479bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5480 StringRef Name;
5481 if (check(getParser().parseIdentifier(Name), L,
5482 "expected symbol after directive") ||
5483 parseToken(AsmToken::EndOfStatement))
5484 return true;
5485
5486 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5487 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5488 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5489
5490 MCInst Inst;
5491 Inst.setOpcode(AArch64::TLSDESCCALL);
5492 Inst.addOperand(MCOperand::createExpr(Expr));
5493
5494 getParser().getStreamer().emitInstruction(Inst, getSTI());
5495 return false;
5496}
5497
5498/// ::= .loh <lohName | lohId> label1, ..., labelN
5499/// The number of arguments depends on the loh identifier.
5500bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5501 MCLOHType Kind;
5502 if (getParser().getTok().isNot(AsmToken::Identifier)) {
5503 if (getParser().getTok().isNot(AsmToken::Integer))
5504 return TokError("expected an identifier or a number in directive");
5505 // We successfully get a numeric value for the identifier.
5506 // Check if it is valid.
5507 int64_t Id = getParser().getTok().getIntVal();
5508 if (Id <= -1U && !isValidMCLOHType(Id))
5509 return TokError("invalid numeric identifier in directive");
5510 Kind = (MCLOHType)Id;
5511 } else {
5512 StringRef Name = getTok().getIdentifier();
5513 // We successfully parse an identifier.
5514 // Check if it is a recognized one.
5515 int Id = MCLOHNameToId(Name);
5516
5517 if (Id == -1)
5518 return TokError("invalid identifier in directive");
5519 Kind = (MCLOHType)Id;
5520 }
5521 // Consume the identifier.
5522 Lex();
5523 // Get the number of arguments of this LOH.
5524 int NbArgs = MCLOHIdToNbArgs(Kind);
5525
5526 assert(NbArgs != -1 && "Invalid number of arguments")((NbArgs != -1 && "Invalid number of arguments") ? static_cast
<void> (0) : __assert_fail ("NbArgs != -1 && \"Invalid number of arguments\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5526, __PRETTY_FUNCTION__))
;
5527
5528 SmallVector<MCSymbol *, 3> Args;
5529 for (int Idx = 0; Idx < NbArgs; ++Idx) {
5530 StringRef Name;
5531 if (getParser().parseIdentifier(Name))
5532 return TokError("expected identifier in directive");
5533 Args.push_back(getContext().getOrCreateSymbol(Name));
5534
5535 if (Idx + 1 == NbArgs)
5536 break;
5537 if (parseToken(AsmToken::Comma,
5538 "unexpected token in '" + Twine(IDVal) + "' directive"))
5539 return true;
5540 }
5541 if (parseToken(AsmToken::EndOfStatement,
5542 "unexpected token in '" + Twine(IDVal) + "' directive"))
5543 return true;
5544
5545 getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
5546 return false;
5547}
5548
5549/// parseDirectiveLtorg
5550/// ::= .ltorg | .pool
5551bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5552 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5553 return true;
5554 getTargetStreamer().emitCurrentConstantPool();
5555 return false;
5556}
5557
5558/// parseDirectiveReq
5559/// ::= name .req registername
5560bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5561 MCAsmParser &Parser = getParser();
5562 Parser.Lex(); // Eat the '.req' token.
5563 SMLoc SRegLoc = getLoc();
5564 RegKind RegisterKind = RegKind::Scalar;
5565 unsigned RegNum;
5566 OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5567
5568 if (ParseRes != MatchOperand_Success) {
5569 StringRef Kind;
5570 RegisterKind = RegKind::NeonVector;
5571 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5572
5573 if (ParseRes == MatchOperand_ParseFail)
5574 return true;
5575
5576 if (ParseRes == MatchOperand_Success && !Kind.empty())
5577 return Error(SRegLoc, "vector register without type specifier expected");
5578 }
5579
5580 if (ParseRes != MatchOperand_Success) {
5581 StringRef Kind;
5582 RegisterKind = RegKind::SVEDataVector;
5583 ParseRes =
5584 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5585
5586 if (ParseRes == MatchOperand_ParseFail)
5587 return true;
5588
5589 if (ParseRes == MatchOperand_Success && !Kind.empty())
5590 return Error(SRegLoc,
5591 "sve vector register without type specifier expected");
5592 }
5593
5594 if (ParseRes != MatchOperand_Success) {
5595 StringRef Kind;
5596 RegisterKind = RegKind::SVEPredicateVector;
5597 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5598
5599 if (ParseRes == MatchOperand_ParseFail)
5600 return true;
5601
5602 if (ParseRes == MatchOperand_Success && !Kind.empty())
5603 return Error(SRegLoc,
5604 "sve predicate register without type specifier expected");
5605 }
5606
5607 if (ParseRes != MatchOperand_Success)
5608 return Error(SRegLoc, "register name or alias expected");
5609
5610 // Shouldn't be anything else.
5611 if (parseToken(AsmToken::EndOfStatement,
5612 "unexpected input in .req directive"))
5613 return true;
5614
5615 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
5616 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
5617 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
5618
5619 return false;
5620}
5621
5622/// parseDirectiveUneq
5623/// ::= .unreq registername
5624bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
5625 MCAsmParser &Parser = getParser();
5626 if (getTok().isNot(AsmToken::Identifier))
5627 return TokError("unexpected input in .unreq directive.");
5628 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
5629 Parser.Lex(); // Eat the identifier.
5630 if (parseToken(AsmToken::EndOfStatement))
5631 return addErrorSuffix("in '.unreq' directive");
5632 return false;
5633}
5634
5635bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
5636 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5637 return true;
5638 getStreamer().emitCFINegateRAState();
5639 return false;
5640}
5641
5642/// parseDirectiveCFIBKeyFrame
5643/// ::= .cfi_b_key
5644bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
5645 if (parseToken(AsmToken::EndOfStatement,
5646 "unexpected token in '.cfi_b_key_frame'"))
5647 return true;
5648 getStreamer().emitCFIBKeyFrame();
5649 return false;
5650}
5651
5652/// parseDirectiveSEHAllocStack
5653/// ::= .seh_stackalloc
5654bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
5655 int64_t Size;
5656 if (parseImmExpr(Size))
5657 return true;
5658 getTargetStreamer().EmitARM64WinCFIAllocStack(Size);
5659 return false;
5660}
5661
5662/// parseDirectiveSEHPrologEnd
5663/// ::= .seh_endprologue
5664bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
5665 getTargetStreamer().EmitARM64WinCFIPrologEnd();
5666 return false;
5667}
5668
5669/// parseDirectiveSEHSaveR19R20X
5670/// ::= .seh_save_r19r20_x
5671bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
5672 int64_t Offset;
5673 if (parseImmExpr(Offset))
5674 return true;
5675 getTargetStreamer().EmitARM64WinCFISaveR19R20X(Offset);
5676 return false;
5677}
5678
5679/// parseDirectiveSEHSaveFPLR
5680/// ::= .seh_save_fplr
5681bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
5682 int64_t Offset;
5683 if (parseImmExpr(Offset))
5684 return true;
5685 getTargetStreamer().EmitARM64WinCFISaveFPLR(Offset);
5686 return false;
5687}
5688
5689/// parseDirectiveSEHSaveFPLRX
5690/// ::= .seh_save_fplr_x
5691bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
5692 int64_t Offset;
5693 if (parseImmExpr(Offset))
5694 return true;
5695 getTargetStreamer().EmitARM64WinCFISaveFPLRX(Offset);
5696 return false;
5697}
5698
5699/// parseDirectiveSEHSaveReg
5700/// ::= .seh_save_reg
5701bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
5702 unsigned Reg;
5703 int64_t Offset;
5704 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
5705 parseComma() || parseImmExpr(Offset))
5706 return true;
5707 getTargetStreamer().EmitARM64WinCFISaveReg(Reg, Offset);
5708 return false;
5709}
5710
5711/// parseDirectiveSEHSaveRegX
5712/// ::= .seh_save_reg_x
5713bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
5714 unsigned Reg;
5715 int64_t Offset;
5716 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
5717 parseComma() || parseImmExpr(Offset))
5718 return true;
5719 getTargetStreamer().EmitARM64WinCFISaveRegX(Reg, Offset);
5720 return false;
5721}
5722
5723/// parseDirectiveSEHSaveRegP
5724/// ::= .seh_save_regp
5725bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
5726 unsigned Reg;
5727 int64_t Offset;
5728 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
23
Calling 'AArch64AsmParser::parseRegisterInRange'
5729 parseComma() || parseImmExpr(Offset))
5730 return true;
5731 getTargetStreamer().EmitARM64WinCFISaveRegP(Reg, Offset);
5732 return false;
5733}
5734
5735/// parseDirectiveSEHSaveRegPX
5736/// ::= .seh_save_regp_x
5737bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
5738 unsigned Reg;
5739 int64_t Offset;
5740 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
5741 parseComma() || parseImmExpr(Offset))
5742 return true;
5743 getTargetStreamer().EmitARM64WinCFISaveRegPX(Reg, Offset);
5744 return false;
5745}
5746
5747/// parseDirectiveSEHSaveLRPair
5748/// ::= .seh_save_lrpair
5749bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
5750 unsigned Reg;
5751 int64_t Offset;
5752 L = getLoc();
5753 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
5754 parseComma() || parseImmExpr(Offset))
5755 return true;
5756 if (check(((Reg - 19) % 2 != 0), L,
5757 "expected register with even offset from x19"))
5758 return true;
5759 getTargetStreamer().EmitARM64WinCFISaveLRPair(Reg, Offset);
5760 return false;
5761}
5762
5763/// parseDirectiveSEHSaveFReg
5764/// ::= .seh_save_freg
5765bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
5766 unsigned Reg;
5767 int64_t Offset;
5768 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
5769 parseComma() || parseImmExpr(Offset))
5770 return true;
5771 getTargetStreamer().EmitARM64WinCFISaveFReg(Reg, Offset);
5772 return