Bug Summary

File:llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Warning:line 4430, column 15
The left operand of '==' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AArch64AsmParser.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/AArch64/AsmParser -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/AArch64/AsmParser/.. -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/AArch64/AsmParser/.. -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/AArch64/AsmParser -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
</
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "MCTargetDesc/AArch64AddressingModes.h"
10#include "MCTargetDesc/AArch64InstPrinter.h"
11#include "MCTargetDesc/AArch64MCExpr.h"
12#include "MCTargetDesc/AArch64MCTargetDesc.h"
13#include "MCTargetDesc/AArch64TargetStreamer.h"
14#include "TargetInfo/AArch64TargetInfo.h"
15#include "AArch64InstrInfo.h"
16#include "Utils/AArch64BaseInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringExtras.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/StringSwitch.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
31#include "llvm/MC/MCLinkerOptimizationHint.h"
32#include "llvm/MC/MCObjectFileInfo.h"
33#include "llvm/MC/MCParser/MCAsmLexer.h"
34#include "llvm/MC/MCParser/MCAsmParser.h"
35#include "llvm/MC/MCParser/MCAsmParserExtension.h"
36#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
37#include "llvm/MC/MCParser/MCTargetAsmParser.h"
38#include "llvm/MC/MCRegisterInfo.h"
39#include "llvm/MC/MCStreamer.h"
40#include "llvm/MC/MCSubtargetInfo.h"
41#include "llvm/MC/MCSymbol.h"
42#include "llvm/MC/MCTargetOptions.h"
43#include "llvm/MC/SubtargetFeature.h"
44#include "llvm/MC/MCValue.h"
45#include "llvm/Support/Casting.h"
46#include "llvm/Support/Compiler.h"
47#include "llvm/Support/ErrorHandling.h"
48#include "llvm/Support/MathExtras.h"
49#include "llvm/Support/SMLoc.h"
50#include "llvm/Support/TargetParser.h"
51#include "llvm/Support/TargetRegistry.h"
52#include "llvm/Support/raw_ostream.h"
53#include <cassert>
54#include <cctype>
55#include <cstdint>
56#include <cstdio>
57#include <string>
58#include <tuple>
59#include <utility>
60#include <vector>
61
62using namespace llvm;
63
64namespace {
65
66enum class RegKind {
67 Scalar,
68 NeonVector,
69 SVEDataVector,
70 SVEPredicateVector,
71 Matrix
72};
73
74enum class MatrixKind { Array, Tile, Row, Col };
75
76enum RegConstraintEqualityTy {
77 EqualsReg,
78 EqualsSuperReg,
79 EqualsSubReg
80};
81
82class AArch64AsmParser : public MCTargetAsmParser {
83private:
84 StringRef Mnemonic; ///< Instruction mnemonic.
85
86 // Map of register aliases registers via the .req directive.
87 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
88
89 class PrefixInfo {
90 public:
91 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
92 PrefixInfo Prefix;
93 switch (Inst.getOpcode()) {
94 case AArch64::MOVPRFX_ZZ:
95 Prefix.Active = true;
96 Prefix.Dst = Inst.getOperand(0).getReg();
97 break;
98 case AArch64::MOVPRFX_ZPmZ_B:
99 case AArch64::MOVPRFX_ZPmZ_H:
100 case AArch64::MOVPRFX_ZPmZ_S:
101 case AArch64::MOVPRFX_ZPmZ_D:
102 Prefix.Active = true;
103 Prefix.Predicated = true;
104 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
105 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast<void> (0))
106 "No destructive element size set for movprfx")(static_cast<void> (0));
107 Prefix.Dst = Inst.getOperand(0).getReg();
108 Prefix.Pg = Inst.getOperand(2).getReg();
109 break;
110 case AArch64::MOVPRFX_ZPzZ_B:
111 case AArch64::MOVPRFX_ZPzZ_H:
112 case AArch64::MOVPRFX_ZPzZ_S:
113 case AArch64::MOVPRFX_ZPzZ_D:
114 Prefix.Active = true;
115 Prefix.Predicated = true;
116 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
117 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast<void> (0))
118 "No destructive element size set for movprfx")(static_cast<void> (0));
119 Prefix.Dst = Inst.getOperand(0).getReg();
120 Prefix.Pg = Inst.getOperand(1).getReg();
121 break;
122 default:
123 break;
124 }
125
126 return Prefix;
127 }
128
129 PrefixInfo() : Active(false), Predicated(false) {}
130 bool isActive() const { return Active; }
131 bool isPredicated() const { return Predicated; }
132 unsigned getElementSize() const {
133 assert(Predicated)(static_cast<void> (0));
134 return ElementSize;
135 }
136 unsigned getDstReg() const { return Dst; }
137 unsigned getPgReg() const {
138 assert(Predicated)(static_cast<void> (0));
139 return Pg;
140 }
141
142 private:
143 bool Active;
144 bool Predicated;
145 unsigned ElementSize;
146 unsigned Dst;
147 unsigned Pg;
148 } NextPrefix;
149
150 AArch64TargetStreamer &getTargetStreamer() {
151 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
152 return static_cast<AArch64TargetStreamer &>(TS);
153 }
154
155 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
156
157 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
158 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
159 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
160 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
161 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
162 bool parseRegister(OperandVector &Operands);
163 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
164 bool parseNeonVectorList(OperandVector &Operands);
165 bool parseOptionalMulOperand(OperandVector &Operands);
166 bool parseKeywordOperand(OperandVector &Operands);
167 bool parseOperand(OperandVector &Operands, bool isCondCode,
168 bool invertCondCode);
169 bool parseImmExpr(int64_t &Out);
170 bool parseComma();
171 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
172 unsigned Last);
173
174 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
175 OperandVector &Operands);
176
177 bool parseDirectiveArch(SMLoc L);
178 bool parseDirectiveArchExtension(SMLoc L);
179 bool parseDirectiveCPU(SMLoc L);
180 bool parseDirectiveInst(SMLoc L);
181
182 bool parseDirectiveTLSDescCall(SMLoc L);
183
184 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
185 bool parseDirectiveLtorg(SMLoc L);
186
187 bool parseDirectiveReq(StringRef Name, SMLoc L);
188 bool parseDirectiveUnreq(SMLoc L);
189 bool parseDirectiveCFINegateRAState();
190 bool parseDirectiveCFIBKeyFrame();
191
192 bool parseDirectiveVariantPCS(SMLoc L);
193
194 bool parseDirectiveSEHAllocStack(SMLoc L);
195 bool parseDirectiveSEHPrologEnd(SMLoc L);
196 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
197 bool parseDirectiveSEHSaveFPLR(SMLoc L);
198 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
199 bool parseDirectiveSEHSaveReg(SMLoc L);
200 bool parseDirectiveSEHSaveRegX(SMLoc L);
201 bool parseDirectiveSEHSaveRegP(SMLoc L);
202 bool parseDirectiveSEHSaveRegPX(SMLoc L);
203 bool parseDirectiveSEHSaveLRPair(SMLoc L);
204 bool parseDirectiveSEHSaveFReg(SMLoc L);
205 bool parseDirectiveSEHSaveFRegX(SMLoc L);
206 bool parseDirectiveSEHSaveFRegP(SMLoc L);
207 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
208 bool parseDirectiveSEHSetFP(SMLoc L);
209 bool parseDirectiveSEHAddFP(SMLoc L);
210 bool parseDirectiveSEHNop(SMLoc L);
211 bool parseDirectiveSEHSaveNext(SMLoc L);
212 bool parseDirectiveSEHEpilogStart(SMLoc L);
213 bool parseDirectiveSEHEpilogEnd(SMLoc L);
214 bool parseDirectiveSEHTrapFrame(SMLoc L);
215 bool parseDirectiveSEHMachineFrame(SMLoc L);
216 bool parseDirectiveSEHContext(SMLoc L);
217 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
218
219 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
220 SmallVectorImpl<SMLoc> &Loc);
221 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
222 OperandVector &Operands, MCStreamer &Out,
223 uint64_t &ErrorInfo,
224 bool MatchingInlineAsm) override;
225/// @name Auto-generated Match Functions
226/// {
227
228#define GET_ASSEMBLER_HEADER
229#include "AArch64GenAsmMatcher.inc"
230
231 /// }
232
233 OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
234 OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
235 RegKind MatchKind);
236 OperandMatchResultTy tryParseMatrixRegister(OperandVector &Operands);
237 OperandMatchResultTy tryParseSVCR(OperandVector &Operands);
238 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
239 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
240 OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
241 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
242 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
243 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
244 template <bool IsSVEPrefetch = false>
245 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
246 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
247 OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
248 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
249 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
250 template<bool AddFPZeroAsLiteral>
251 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
252 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
253 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
254 bool tryParseNeonVectorRegister(OperandVector &Operands);
255 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
256 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
257 template <bool ParseShiftExtend,
258 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
259 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
260 template <bool ParseShiftExtend, bool ParseSuffix>
261 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
262 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
263 template <RegKind VectorKind>
264 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
265 bool ExpectMatch = false);
266 OperandMatchResultTy tryParseMatrixTileList(OperandVector &Operands);
267 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
268 OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands);
269
270public:
271 enum AArch64MatchResultTy {
272 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
273#define GET_OPERAND_DIAGNOSTIC_TYPES
274#include "AArch64GenAsmMatcher.inc"
275 };
276 bool IsILP32;
277
278 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
279 const MCInstrInfo &MII, const MCTargetOptions &Options)
280 : MCTargetAsmParser(Options, STI, MII) {
281 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
282 MCAsmParserExtension::Initialize(Parser);
283 MCStreamer &S = getParser().getStreamer();
284 if (S.getTargetStreamer() == nullptr)
285 new AArch64TargetStreamer(S);
286
287 // Alias .hword/.word/.[dx]word to the target-independent
288 // .2byte/.4byte/.8byte directives as they have the same form and
289 // semantics:
290 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
291 Parser.addAliasForDirective(".hword", ".2byte");
292 Parser.addAliasForDirective(".word", ".4byte");
293 Parser.addAliasForDirective(".dword", ".8byte");
294 Parser.addAliasForDirective(".xword", ".8byte");
295
296 // Initialize the set of available features.
297 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
298 }
299
300 bool regsEqual(const MCParsedAsmOperand &Op1,
301 const MCParsedAsmOperand &Op2) const override;
302 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
303 SMLoc NameLoc, OperandVector &Operands) override;
304 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
305 OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
306 SMLoc &EndLoc) override;
307 bool ParseDirective(AsmToken DirectiveID) override;
308 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
309 unsigned Kind) override;
310
311 static bool classifySymbolRef(const MCExpr *Expr,
312 AArch64MCExpr::VariantKind &ELFRefKind,
313 MCSymbolRefExpr::VariantKind &DarwinRefKind,
314 int64_t &Addend);
315};
316
317/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
318/// instruction.
319class AArch64Operand : public MCParsedAsmOperand {
320private:
321 enum KindTy {
322 k_Immediate,
323 k_ShiftedImm,
324 k_CondCode,
325 k_Register,
326 k_MatrixRegister,
327 k_MatrixTileList,
328 k_SVCR,
329 k_VectorList,
330 k_VectorIndex,
331 k_Token,
332 k_SysReg,
333 k_SysCR,
334 k_Prefetch,
335 k_ShiftExtend,
336 k_FPImm,
337 k_Barrier,
338 k_PSBHint,
339 k_BTIHint,
340 } Kind;
341
342 SMLoc StartLoc, EndLoc;
343
344 struct TokOp {
345 const char *Data;
346 unsigned Length;
347 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
348 };
349
350 // Separate shift/extend operand.
351 struct ShiftExtendOp {
352 AArch64_AM::ShiftExtendType Type;
353 unsigned Amount;
354 bool HasExplicitAmount;
355 };
356
357 struct RegOp {
358 unsigned RegNum;
359 RegKind Kind;
360 int ElementWidth;
361
362 // The register may be allowed as a different register class,
363 // e.g. for GPR64as32 or GPR32as64.
364 RegConstraintEqualityTy EqualityTy;
365
366 // In some cases the shift/extend needs to be explicitly parsed together
367 // with the register, rather than as a separate operand. This is needed
368 // for addressing modes where the instruction as a whole dictates the
369 // scaling/extend, rather than specific bits in the instruction.
370 // By parsing them as a single operand, we avoid the need to pass an
371 // extra operand in all CodeGen patterns (because all operands need to
372 // have an associated value), and we avoid the need to update TableGen to
373 // accept operands that have no associated bits in the instruction.
374 //
375 // An added benefit of parsing them together is that the assembler
376 // can give a sensible diagnostic if the scaling is not correct.
377 //
378 // The default is 'lsl #0' (HasExplicitAmount = false) if no
379 // ShiftExtend is specified.
380 ShiftExtendOp ShiftExtend;
381 };
382
383 struct MatrixRegOp {
384 unsigned RegNum;
385 unsigned ElementWidth;
386 MatrixKind Kind;
387 };
388
389 struct MatrixTileListOp {
390 unsigned RegMask = 0;
391 };
392
393 struct VectorListOp {
394 unsigned RegNum;
395 unsigned Count;
396 unsigned NumElements;
397 unsigned ElementWidth;
398 RegKind RegisterKind;
399 };
400
401 struct VectorIndexOp {
402 int Val;
403 };
404
405 struct ImmOp {
406 const MCExpr *Val;
407 };
408
409 struct ShiftedImmOp {
410 const MCExpr *Val;
411 unsigned ShiftAmount;
412 };
413
414 struct CondCodeOp {
415 AArch64CC::CondCode Code;
416 };
417
418 struct FPImmOp {
419 uint64_t Val; // APFloat value bitcasted to uint64_t.
420 bool IsExact; // describes whether parsed value was exact.
421 };
422
423 struct BarrierOp {
424 const char *Data;
425 unsigned Length;
426 unsigned Val; // Not the enum since not all values have names.
427 bool HasnXSModifier;
428 };
429
430 struct SysRegOp {
431 const char *Data;
432 unsigned Length;
433 uint32_t MRSReg;
434 uint32_t MSRReg;
435 uint32_t PStateField;
436 };
437
438 struct SysCRImmOp {
439 unsigned Val;
440 };
441
442 struct PrefetchOp {
443 const char *Data;
444 unsigned Length;
445 unsigned Val;
446 };
447
448 struct PSBHintOp {
449 const char *Data;
450 unsigned Length;
451 unsigned Val;
452 };
453
454 struct BTIHintOp {
455 const char *Data;
456 unsigned Length;
457 unsigned Val;
458 };
459
460 struct SVCROp {
461 const char *Data;
462 unsigned Length;
463 unsigned PStateField;
464 };
465
466 union {
467 struct TokOp Tok;
468 struct RegOp Reg;
469 struct MatrixRegOp MatrixReg;
470 struct MatrixTileListOp MatrixTileList;
471 struct VectorListOp VectorList;
472 struct VectorIndexOp VectorIndex;
473 struct ImmOp Imm;
474 struct ShiftedImmOp ShiftedImm;
475 struct CondCodeOp CondCode;
476 struct FPImmOp FPImm;
477 struct BarrierOp Barrier;
478 struct SysRegOp SysReg;
479 struct SysCRImmOp SysCRImm;
480 struct PrefetchOp Prefetch;
481 struct PSBHintOp PSBHint;
482 struct BTIHintOp BTIHint;
483 struct ShiftExtendOp ShiftExtend;
484 struct SVCROp SVCR;
485 };
486
487 // Keep the MCContext around as the MCExprs may need manipulated during
488 // the add<>Operands() calls.
489 MCContext &Ctx;
490
491public:
492 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
493
494 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
495 Kind = o.Kind;
496 StartLoc = o.StartLoc;
497 EndLoc = o.EndLoc;
498 switch (Kind) {
499 case k_Token:
500 Tok = o.Tok;
501 break;
502 case k_Immediate:
503 Imm = o.Imm;
504 break;
505 case k_ShiftedImm:
506 ShiftedImm = o.ShiftedImm;
507 break;
508 case k_CondCode:
509 CondCode = o.CondCode;
510 break;
511 case k_FPImm:
512 FPImm = o.FPImm;
513 break;
514 case k_Barrier:
515 Barrier = o.Barrier;
516 break;
517 case k_Register:
518 Reg = o.Reg;
519 break;
520 case k_MatrixRegister:
521 MatrixReg = o.MatrixReg;
522 break;
523 case k_MatrixTileList:
524 MatrixTileList = o.MatrixTileList;
525 break;
526 case k_VectorList:
527 VectorList = o.VectorList;
528 break;
529 case k_VectorIndex:
530 VectorIndex = o.VectorIndex;
531 break;
532 case k_SysReg:
533 SysReg = o.SysReg;
534 break;
535 case k_SysCR:
536 SysCRImm = o.SysCRImm;
537 break;
538 case k_Prefetch:
539 Prefetch = o.Prefetch;
540 break;
541 case k_PSBHint:
542 PSBHint = o.PSBHint;
543 break;
544 case k_BTIHint:
545 BTIHint = o.BTIHint;
546 break;
547 case k_ShiftExtend:
548 ShiftExtend = o.ShiftExtend;
549 break;
550 case k_SVCR:
551 SVCR = o.SVCR;
552 break;
553 }
554 }
555
556 /// getStartLoc - Get the location of the first token of this operand.
557 SMLoc getStartLoc() const override { return StartLoc; }
558 /// getEndLoc - Get the location of the last token of this operand.
559 SMLoc getEndLoc() const override { return EndLoc; }
560
561 StringRef getToken() const {
562 assert(Kind == k_Token && "Invalid access!")(static_cast<void> (0));
563 return StringRef(Tok.Data, Tok.Length);
564 }
565
566 bool isTokenSuffix() const {
567 assert(Kind == k_Token && "Invalid access!")(static_cast<void> (0));
568 return Tok.IsSuffix;
569 }
570
571 const MCExpr *getImm() const {
572 assert(Kind == k_Immediate && "Invalid access!")(static_cast<void> (0));
573 return Imm.Val;
574 }
575
576 const MCExpr *getShiftedImmVal() const {
577 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast<void> (0));
578 return ShiftedImm.Val;
579 }
580
581 unsigned getShiftedImmShift() const {
582 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast<void> (0));
583 return ShiftedImm.ShiftAmount;
584 }
585
586 AArch64CC::CondCode getCondCode() const {
587 assert(Kind == k_CondCode && "Invalid access!")(static_cast<void> (0));
588 return CondCode.Code;
589 }
590
591 APFloat getFPImm() const {
592 assert (Kind == k_FPImm && "Invalid access!")(static_cast<void> (0));
593 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
594 }
595
596 bool getFPImmIsExact() const {
597 assert (Kind == k_FPImm && "Invalid access!")(static_cast<void> (0));
598 return FPImm.IsExact;
599 }
600
601 unsigned getBarrier() const {
602 assert(Kind == k_Barrier && "Invalid access!")(static_cast<void> (0));
603 return Barrier.Val;
604 }
605
606 StringRef getBarrierName() const {
607 assert(Kind == k_Barrier && "Invalid access!")(static_cast<void> (0));
608 return StringRef(Barrier.Data, Barrier.Length);
609 }
610
611 bool getBarriernXSModifier() const {
612 assert(Kind == k_Barrier && "Invalid access!")(static_cast<void> (0));
613 return Barrier.HasnXSModifier;
614 }
615
616 unsigned getReg() const override {
617 assert(Kind == k_Register && "Invalid access!")(static_cast<void> (0));
618 return Reg.RegNum;
619 }
620
621 unsigned getMatrixReg() const {
622 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast<void> (0));
623 return MatrixReg.RegNum;
624 }
625
626 unsigned getMatrixElementWidth() const {
627 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast<void> (0));
628 return MatrixReg.ElementWidth;
629 }
630
631 MatrixKind getMatrixKind() const {
632 assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast<void> (0));
633 return MatrixReg.Kind;
634 }
635
636 unsigned getMatrixTileListRegMask() const {
637 assert(isMatrixTileList() && "Invalid access!")(static_cast<void> (0));
638 return MatrixTileList.RegMask;
639 }
640
641 RegConstraintEqualityTy getRegEqualityTy() const {
642 assert(Kind == k_Register && "Invalid access!")(static_cast<void> (0));
643 return Reg.EqualityTy;
644 }
645
646 unsigned getVectorListStart() const {
647 assert(Kind == k_VectorList && "Invalid access!")(static_cast<void> (0));
648 return VectorList.RegNum;
649 }
650
651 unsigned getVectorListCount() const {
652 assert(Kind == k_VectorList && "Invalid access!")(static_cast<void> (0));
653 return VectorList.Count;
654 }
655
656 int getVectorIndex() const {
657 assert(Kind == k_VectorIndex && "Invalid access!")(static_cast<void> (0));
658 return VectorIndex.Val;
659 }
660
661 StringRef getSysReg() const {
662 assert(Kind == k_SysReg && "Invalid access!")(static_cast<void> (0));
663 return StringRef(SysReg.Data, SysReg.Length);
664 }
665
666 unsigned getSysCR() const {
667 assert(Kind == k_SysCR && "Invalid access!")(static_cast<void> (0));
668 return SysCRImm.Val;
669 }
670
671 unsigned getPrefetch() const {
672 assert(Kind == k_Prefetch && "Invalid access!")(static_cast<void> (0));
673 return Prefetch.Val;
674 }
675
676 unsigned getPSBHint() const {
677 assert(Kind == k_PSBHint && "Invalid access!")(static_cast<void> (0));
678 return PSBHint.Val;
679 }
680
681 StringRef getPSBHintName() const {
682 assert(Kind == k_PSBHint && "Invalid access!")(static_cast<void> (0));
683 return StringRef(PSBHint.Data, PSBHint.Length);
684 }
685
686 unsigned getBTIHint() const {
687 assert(Kind == k_BTIHint && "Invalid access!")(static_cast<void> (0));
688 return BTIHint.Val;
689 }
690
691 StringRef getBTIHintName() const {
692 assert(Kind == k_BTIHint && "Invalid access!")(static_cast<void> (0));
693 return StringRef(BTIHint.Data, BTIHint.Length);
694 }
695
696 StringRef getSVCR() const {
697 assert(Kind == k_SVCR && "Invalid access!")(static_cast<void> (0));
698 return StringRef(SVCR.Data, SVCR.Length);
699 }
700
701 StringRef getPrefetchName() const {
702 assert(Kind == k_Prefetch && "Invalid access!")(static_cast<void> (0));
703 return StringRef(Prefetch.Data, Prefetch.Length);
704 }
705
706 AArch64_AM::ShiftExtendType getShiftExtendType() const {
707 if (Kind == k_ShiftExtend)
708 return ShiftExtend.Type;
709 if (Kind == k_Register)
710 return Reg.ShiftExtend.Type;
711 llvm_unreachable("Invalid access!")__builtin_unreachable();
712 }
713
714 unsigned getShiftExtendAmount() const {
715 if (Kind == k_ShiftExtend)
716 return ShiftExtend.Amount;
717 if (Kind == k_Register)
718 return Reg.ShiftExtend.Amount;
719 llvm_unreachable("Invalid access!")__builtin_unreachable();
720 }
721
722 bool hasShiftExtendAmount() const {
723 if (Kind == k_ShiftExtend)
724 return ShiftExtend.HasExplicitAmount;
725 if (Kind == k_Register)
726 return Reg.ShiftExtend.HasExplicitAmount;
727 llvm_unreachable("Invalid access!")__builtin_unreachable();
728 }
729
730 bool isImm() const override { return Kind == k_Immediate; }
731 bool isMem() const override { return false; }
732
733 bool isUImm6() const {
734 if (!isImm())
735 return false;
736 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
737 if (!MCE)
738 return false;
739 int64_t Val = MCE->getValue();
740 return (Val >= 0 && Val < 64);
741 }
742
743 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
744
745 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
746 return isImmScaled<Bits, Scale>(true);
747 }
748
749 template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
750 return isImmScaled<Bits, Scale>(false);
751 }
752
753 template <int Bits, int Scale>
754 DiagnosticPredicate isImmScaled(bool Signed) const {
755 if (!isImm())
756 return DiagnosticPredicateTy::NoMatch;
757
758 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
759 if (!MCE)
760 return DiagnosticPredicateTy::NoMatch;
761
762 int64_t MinVal, MaxVal;
763 if (Signed) {
764 int64_t Shift = Bits - 1;
765 MinVal = (int64_t(1) << Shift) * -Scale;
766 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
767 } else {
768 MinVal = 0;
769 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
770 }
771
772 int64_t Val = MCE->getValue();
773 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
774 return DiagnosticPredicateTy::Match;
775
776 return DiagnosticPredicateTy::NearMatch;
777 }
778
779 DiagnosticPredicate isSVEPattern() const {
780 if (!isImm())
781 return DiagnosticPredicateTy::NoMatch;
782 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
783 if (!MCE)
784 return DiagnosticPredicateTy::NoMatch;
785 int64_t Val = MCE->getValue();
786 if (Val >= 0 && Val < 32)
787 return DiagnosticPredicateTy::Match;
788 return DiagnosticPredicateTy::NearMatch;
789 }
790
791 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
792 AArch64MCExpr::VariantKind ELFRefKind;
793 MCSymbolRefExpr::VariantKind DarwinRefKind;
794 int64_t Addend;
795 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
796 Addend)) {
797 // If we don't understand the expression, assume the best and
798 // let the fixup and relocation code deal with it.
799 return true;
800 }
801
802 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
803 ELFRefKind == AArch64MCExpr::VK_LO12 ||
804 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
805 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
806 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
807 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
808 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
809 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
810 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
811 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
812 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
813 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
814 // Note that we don't range-check the addend. It's adjusted modulo page
815 // size when converted, so there is no "out of range" condition when using
816 // @pageoff.
817 return true;
818 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
819 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
820 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
821 return Addend == 0;
822 }
823
824 return false;
825 }
826
827 template <int Scale> bool isUImm12Offset() const {
828 if (!isImm())
829 return false;
830
831 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
832 if (!MCE)
833 return isSymbolicUImm12Offset(getImm());
834
835 int64_t Val = MCE->getValue();
836 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
837 }
838
839 template <int N, int M>
840 bool isImmInRange() const {
841 if (!isImm())
842 return false;
843 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
844 if (!MCE)
845 return false;
846 int64_t Val = MCE->getValue();
847 return (Val >= N && Val <= M);
848 }
849
850 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
851 // a logical immediate can always be represented when inverted.
852 template <typename T>
853 bool isLogicalImm() const {
854 if (!isImm())
855 return false;
856 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
857 if (!MCE)
858 return false;
859
860 int64_t Val = MCE->getValue();
861 // Avoid left shift by 64 directly.
862 uint64_t Upper = UINT64_C(-1)-1UL << (sizeof(T) * 4) << (sizeof(T) * 4);
863 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
864 if ((Val & Upper) && (Val & Upper) != Upper)
865 return false;
866
867 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
868 }
869
870 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
871
872 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
873 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
874 /// immediate that can be shifted by 'Shift'.
875 template <unsigned Width>
876 Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
877 if (isShiftedImm() && Width == getShiftedImmShift())
878 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
879 return std::make_pair(CE->getValue(), Width);
880
881 if (isImm())
882 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
883 int64_t Val = CE->getValue();
884 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
885 return std::make_pair(Val >> Width, Width);
886 else
887 return std::make_pair(Val, 0u);
888 }
889
890 return {};
891 }
892
893 bool isAddSubImm() const {
894 if (!isShiftedImm() && !isImm())
895 return false;
896
897 const MCExpr *Expr;
898
899 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
900 if (isShiftedImm()) {
901 unsigned Shift = ShiftedImm.ShiftAmount;
902 Expr = ShiftedImm.Val;
903 if (Shift != 0 && Shift != 12)
904 return false;
905 } else {
906 Expr = getImm();
907 }
908
909 AArch64MCExpr::VariantKind ELFRefKind;
910 MCSymbolRefExpr::VariantKind DarwinRefKind;
911 int64_t Addend;
912 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
913 DarwinRefKind, Addend)) {
914 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
915 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
916 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
917 || ELFRefKind == AArch64MCExpr::VK_LO12
918 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
919 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
920 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
921 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
922 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
923 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
924 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
925 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
926 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
927 }
928
929 // If it's a constant, it should be a real immediate in range.
930 if (auto ShiftedVal = getShiftedVal<12>())
931 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
932
933 // If it's an expression, we hope for the best and let the fixup/relocation
934 // code deal with it.
935 return true;
936 }
937
938 bool isAddSubImmNeg() const {
939 if (!isShiftedImm() && !isImm())
940 return false;
941
942 // Otherwise it should be a real negative immediate in range.
943 if (auto ShiftedVal = getShiftedVal<12>())
944 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
945
946 return false;
947 }
948
949 // Signed value in the range -128 to +127. For element widths of
950 // 16 bits or higher it may also be a signed multiple of 256 in the
951 // range -32768 to +32512.
952 // For element-width of 8 bits a range of -128 to 255 is accepted,
953 // since a copy of a byte can be either signed/unsigned.
954 template <typename T>
955 DiagnosticPredicate isSVECpyImm() const {
956 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
957 return DiagnosticPredicateTy::NoMatch;
958
959 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
960 std::is_same<int8_t, T>::value;
961 if (auto ShiftedImm = getShiftedVal<8>())
962 if (!(IsByte && ShiftedImm->second) &&
963 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
964 << ShiftedImm->second))
965 return DiagnosticPredicateTy::Match;
966
967 return DiagnosticPredicateTy::NearMatch;
968 }
969
970 // Unsigned value in the range 0 to 255. For element widths of
971 // 16 bits or higher it may also be a signed multiple of 256 in the
972 // range 0 to 65280.
973 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
974 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
975 return DiagnosticPredicateTy::NoMatch;
976
977 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
978 std::is_same<int8_t, T>::value;
979 if (auto ShiftedImm = getShiftedVal<8>())
980 if (!(IsByte && ShiftedImm->second) &&
981 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
982 << ShiftedImm->second))
983 return DiagnosticPredicateTy::Match;
984
985 return DiagnosticPredicateTy::NearMatch;
986 }
987
988 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
989 if (isLogicalImm<T>() && !isSVECpyImm<T>())
990 return DiagnosticPredicateTy::Match;
991 return DiagnosticPredicateTy::NoMatch;
992 }
993
994 bool isCondCode() const { return Kind == k_CondCode; }
995
996 bool isSIMDImmType10() const {
997 if (!isImm())
998 return false;
999 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1000 if (!MCE)
1001 return false;
1002 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
1003 }
1004
1005 template<int N>
1006 bool isBranchTarget() const {
1007 if (!isImm())
1008 return false;
1009 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1010 if (!MCE)
1011 return true;
1012 int64_t Val = MCE->getValue();
1013 if (Val & 0x3)
1014 return false;
1015 assert(N > 0 && "Branch target immediate cannot be 0 bits!")(static_cast<void> (0));
1016 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1017 }
1018
1019 bool
1020 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1021 if (!isImm())
1022 return false;
1023
1024 AArch64MCExpr::VariantKind ELFRefKind;
1025 MCSymbolRefExpr::VariantKind DarwinRefKind;
1026 int64_t Addend;
1027 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1028 DarwinRefKind, Addend)) {
1029 return false;
1030 }
1031 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1032 return false;
1033
1034 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
1035 if (ELFRefKind == AllowedModifiers[i])
1036 return true;
1037 }
1038
1039 return false;
1040 }
1041
1042 bool isMovWSymbolG3() const {
1043 return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
1044 }
1045
1046 bool isMovWSymbolG2() const {
1047 return isMovWSymbol(
1048 {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
1049 AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
1050 AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
1051 AArch64MCExpr::VK_DTPREL_G2});
1052 }
1053
1054 bool isMovWSymbolG1() const {
1055 return isMovWSymbol(
1056 {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
1057 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
1058 AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
1059 AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
1060 AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
1061 }
1062
1063 bool isMovWSymbolG0() const {
1064 return isMovWSymbol(
1065 {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
1066 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
1067 AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
1068 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
1069 AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
1070 }
1071
1072 template<int RegWidth, int Shift>
1073 bool isMOVZMovAlias() const {
1074 if (!isImm()) return false;
1075
1076 const MCExpr *E = getImm();
1077 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1078 uint64_t Value = CE->getValue();
1079
1080 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1081 }
1082 // Only supports the case of Shift being 0 if an expression is used as an
1083 // operand
1084 return !Shift && E;
1085 }
1086
1087 template<int RegWidth, int Shift>
1088 bool isMOVNMovAlias() const {
1089 if (!isImm()) return false;
1090
1091 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1092 if (!CE) return false;
1093 uint64_t Value = CE->getValue();
1094
1095 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1096 }
1097
1098 bool isFPImm() const {
1099 return Kind == k_FPImm &&
1100 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1101 }
1102
1103 bool isBarrier() const {
1104 return Kind == k_Barrier && !getBarriernXSModifier();
1105 }
1106 bool isBarriernXS() const {
1107 return Kind == k_Barrier && getBarriernXSModifier();
1108 }
1109 bool isSysReg() const { return Kind == k_SysReg; }
1110
1111 bool isMRSSystemRegister() const {
1112 if (!isSysReg()) return false;
1113
1114 return SysReg.MRSReg != -1U;
1115 }
1116
1117 bool isMSRSystemRegister() const {
1118 if (!isSysReg()) return false;
1119 return SysReg.MSRReg != -1U;
1120 }
1121
1122 bool isSystemPStateFieldWithImm0_1() const {
1123 if (!isSysReg()) return false;
1124 return (SysReg.PStateField == AArch64PState::PAN ||
1125 SysReg.PStateField == AArch64PState::DIT ||
1126 SysReg.PStateField == AArch64PState::UAO ||
1127 SysReg.PStateField == AArch64PState::SSBS);
1128 }
1129
1130 bool isSystemPStateFieldWithImm0_15() const {
1131 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1132 return SysReg.PStateField != -1U;
1133 }
1134
1135 bool isSVCR() const {
1136 if (Kind != k_SVCR)
1137 return false;
1138 return SVCR.PStateField != -1U;
1139 }
1140
1141 bool isReg() const override {
1142 return Kind == k_Register;
1143 }
1144
1145 bool isScalarReg() const {
1146 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1147 }
1148
1149 bool isNeonVectorReg() const {
1150 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1151 }
1152
1153 bool isNeonVectorRegLo() const {
1154 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1155 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1156 Reg.RegNum) ||
1157 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1158 Reg.RegNum));
1159 }
1160
1161 bool isMatrix() const { return Kind == k_MatrixRegister; }
1162 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1163
1164 template <unsigned Class> bool isSVEVectorReg() const {
1165 RegKind RK;
1166 switch (Class) {
1167 case AArch64::ZPRRegClassID:
1168 case AArch64::ZPR_3bRegClassID:
1169 case AArch64::ZPR_4bRegClassID:
1170 RK = RegKind::SVEDataVector;
1171 break;
1172 case AArch64::PPRRegClassID:
1173 case AArch64::PPR_3bRegClassID:
1174 RK = RegKind::SVEPredicateVector;
1175 break;
1176 default:
1177 llvm_unreachable("Unsupport register class")__builtin_unreachable();
1178 }
1179
1180 return (Kind == k_Register && Reg.Kind == RK) &&
1181 AArch64MCRegisterClasses[Class].contains(getReg());
1182 }
1183
1184 template <unsigned Class> bool isFPRasZPR() const {
1185 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1186 AArch64MCRegisterClasses[Class].contains(getReg());
1187 }
1188
1189 template <int ElementWidth, unsigned Class>
1190 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1191 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1192 return DiagnosticPredicateTy::NoMatch;
1193
1194 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1195 return DiagnosticPredicateTy::Match;
1196
1197 return DiagnosticPredicateTy::NearMatch;
1198 }
1199
1200 template <int ElementWidth, unsigned Class>
1201 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1202 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1203 return DiagnosticPredicateTy::NoMatch;
1204
1205 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1206 return DiagnosticPredicateTy::Match;
1207
1208 return DiagnosticPredicateTy::NearMatch;
1209 }
1210
1211 template <int ElementWidth, unsigned Class,
1212 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1213 bool ShiftWidthAlwaysSame>
1214 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1215 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1216 if (!VectorMatch.isMatch())
1217 return DiagnosticPredicateTy::NoMatch;
1218
1219 // Give a more specific diagnostic when the user has explicitly typed in
1220 // a shift-amount that does not match what is expected, but for which
1221 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1222 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1223 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1224 ShiftExtendTy == AArch64_AM::SXTW) &&
1225 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1226 return DiagnosticPredicateTy::NoMatch;
1227
1228 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1229 return DiagnosticPredicateTy::Match;
1230
1231 return DiagnosticPredicateTy::NearMatch;
1232 }
1233
1234 bool isGPR32as64() const {
1235 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1236 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1237 }
1238
1239 bool isGPR64as32() const {
1240 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1241 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1242 }
1243
1244 bool isGPR64x8() const {
1245 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1246 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1247 Reg.RegNum);
1248 }
1249
1250 bool isWSeqPair() const {
1251 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1252 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1253 Reg.RegNum);
1254 }
1255
1256 bool isXSeqPair() const {
1257 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1258 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1259 Reg.RegNum);
1260 }
1261
1262 template<int64_t Angle, int64_t Remainder>
1263 DiagnosticPredicate isComplexRotation() const {
1264 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1265
1266 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1267 if (!CE) return DiagnosticPredicateTy::NoMatch;
1268 uint64_t Value = CE->getValue();
1269
1270 if (Value % Angle == Remainder && Value <= 270)
1271 return DiagnosticPredicateTy::Match;
1272 return DiagnosticPredicateTy::NearMatch;
1273 }
1274
1275 template <unsigned RegClassID> bool isGPR64() const {
1276 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1277 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1278 }
1279
1280 template <unsigned RegClassID, int ExtWidth>
1281 DiagnosticPredicate isGPR64WithShiftExtend() const {
1282 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1283 return DiagnosticPredicateTy::NoMatch;
1284
1285 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1286 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1287 return DiagnosticPredicateTy::Match;
1288 return DiagnosticPredicateTy::NearMatch;
1289 }
1290
1291 /// Is this a vector list with the type implicit (presumably attached to the
1292 /// instruction itself)?
1293 template <RegKind VectorKind, unsigned NumRegs>
1294 bool isImplicitlyTypedVectorList() const {
1295 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1296 VectorList.NumElements == 0 &&
1297 VectorList.RegisterKind == VectorKind;
1298 }
1299
1300 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1301 unsigned ElementWidth>
1302 bool isTypedVectorList() const {
1303 if (Kind != k_VectorList)
1304 return false;
1305 if (VectorList.Count != NumRegs)
1306 return false;
1307 if (VectorList.RegisterKind != VectorKind)
1308 return false;
1309 if (VectorList.ElementWidth != ElementWidth)
1310 return false;
1311 return VectorList.NumElements == NumElements;
1312 }
1313
1314 template <int Min, int Max>
1315 DiagnosticPredicate isVectorIndex() const {
1316 if (Kind != k_VectorIndex)
1317 return DiagnosticPredicateTy::NoMatch;
1318 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1319 return DiagnosticPredicateTy::Match;
1320 return DiagnosticPredicateTy::NearMatch;
1321 }
1322
1323 bool isToken() const override { return Kind == k_Token; }
1324
1325 bool isTokenEqual(StringRef Str) const {
1326 return Kind == k_Token && getToken() == Str;
1327 }
1328 bool isSysCR() const { return Kind == k_SysCR; }
1329 bool isPrefetch() const { return Kind == k_Prefetch; }
1330 bool isPSBHint() const { return Kind == k_PSBHint; }
1331 bool isBTIHint() const { return Kind == k_BTIHint; }
1332 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1333 bool isShifter() const {
1334 if (!isShiftExtend())
1335 return false;
1336
1337 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1338 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1339 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1340 ST == AArch64_AM::MSL);
1341 }
1342
1343 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1344 if (Kind != k_FPImm)
1345 return DiagnosticPredicateTy::NoMatch;
1346
1347 if (getFPImmIsExact()) {
1348 // Lookup the immediate from table of supported immediates.
1349 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1350 assert(Desc && "Unknown enum value")(static_cast<void> (0));
1351
1352 // Calculate its FP value.
1353 APFloat RealVal(APFloat::IEEEdouble());
1354 auto StatusOrErr =
1355 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1356 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1357 llvm_unreachable("FP immediate is not exact")__builtin_unreachable();
1358
1359 if (getFPImm().bitwiseIsEqual(RealVal))
1360 return DiagnosticPredicateTy::Match;
1361 }
1362
1363 return DiagnosticPredicateTy::NearMatch;
1364 }
1365
1366 template <unsigned ImmA, unsigned ImmB>
1367 DiagnosticPredicate isExactFPImm() const {
1368 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1369 if ((Res = isExactFPImm<ImmA>()))
1370 return DiagnosticPredicateTy::Match;
1371 if ((Res = isExactFPImm<ImmB>()))
1372 return DiagnosticPredicateTy::Match;
1373 return Res;
1374 }
1375
1376 bool isExtend() const {
1377 if (!isShiftExtend())
1378 return false;
1379
1380 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1381 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1382 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1383 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1384 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1385 ET == AArch64_AM::LSL) &&
1386 getShiftExtendAmount() <= 4;
1387 }
1388
1389 bool isExtend64() const {
1390 if (!isExtend())
1391 return false;
1392 // Make sure the extend expects a 32-bit source register.
1393 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1394 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1395 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1396 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1397 }
1398
1399 bool isExtendLSL64() const {
1400 if (!isExtend())
1401 return false;
1402 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1403 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1404 ET == AArch64_AM::LSL) &&
1405 getShiftExtendAmount() <= 4;
1406 }
1407
1408 template<int Width> bool isMemXExtend() const {
1409 if (!isExtend())
1410 return false;
1411 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1412 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1413 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1414 getShiftExtendAmount() == 0);
1415 }
1416
1417 template<int Width> bool isMemWExtend() const {
1418 if (!isExtend())
1419 return false;
1420 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1421 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1422 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1423 getShiftExtendAmount() == 0);
1424 }
1425
1426 template <unsigned width>
1427 bool isArithmeticShifter() const {
1428 if (!isShifter())
1429 return false;
1430
1431 // An arithmetic shifter is LSL, LSR, or ASR.
1432 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1433 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1434 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1435 }
1436
1437 template <unsigned width>
1438 bool isLogicalShifter() const {
1439 if (!isShifter())
1440 return false;
1441
1442 // A logical shifter is LSL, LSR, ASR or ROR.
1443 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1444 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1445 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1446 getShiftExtendAmount() < width;
1447 }
1448
1449 bool isMovImm32Shifter() const {
1450 if (!isShifter())
1451 return false;
1452
1453 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1454 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1455 if (ST != AArch64_AM::LSL)
1456 return false;
1457 uint64_t Val = getShiftExtendAmount();
1458 return (Val == 0 || Val == 16);
1459 }
1460
1461 bool isMovImm64Shifter() const {
1462 if (!isShifter())
1463 return false;
1464
1465 // A MOVi shifter is LSL of 0 or 16.
1466 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1467 if (ST != AArch64_AM::LSL)
1468 return false;
1469 uint64_t Val = getShiftExtendAmount();
1470 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1471 }
1472
1473 bool isLogicalVecShifter() const {
1474 if (!isShifter())
1475 return false;
1476
1477 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1478 unsigned Shift = getShiftExtendAmount();
1479 return getShiftExtendType() == AArch64_AM::LSL &&
1480 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1481 }
1482
1483 bool isLogicalVecHalfWordShifter() const {
1484 if (!isLogicalVecShifter())
1485 return false;
1486
1487 // A logical vector shifter is a left shift by 0 or 8.
1488 unsigned Shift = getShiftExtendAmount();
1489 return getShiftExtendType() == AArch64_AM::LSL &&
1490 (Shift == 0 || Shift == 8);
1491 }
1492
1493 bool isMoveVecShifter() const {
1494 if (!isShiftExtend())
1495 return false;
1496
1497 // A logical vector shifter is a left shift by 8 or 16.
1498 unsigned Shift = getShiftExtendAmount();
1499 return getShiftExtendType() == AArch64_AM::MSL &&
1500 (Shift == 8 || Shift == 16);
1501 }
1502
1503 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1504 // to LDUR/STUR when the offset is not legal for the former but is for
1505 // the latter. As such, in addition to checking for being a legal unscaled
1506 // address, also check that it is not a legal scaled address. This avoids
1507 // ambiguity in the matcher.
1508 template<int Width>
1509 bool isSImm9OffsetFB() const {
1510 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1511 }
1512
1513 bool isAdrpLabel() const {
1514 // Validation was handled during parsing, so we just sanity check that
1515 // something didn't go haywire.
1516 if (!isImm())
1517 return false;
1518
1519 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1520 int64_t Val = CE->getValue();
1521 int64_t Min = - (4096 * (1LL << (21 - 1)));
1522 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1523 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1524 }
1525
1526 return true;
1527 }
1528
1529 bool isAdrLabel() const {
1530 // Validation was handled during parsing, so we just sanity check that
1531 // something didn't go haywire.
1532 if (!isImm())
1533 return false;
1534
1535 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1536 int64_t Val = CE->getValue();
1537 int64_t Min = - (1LL << (21 - 1));
1538 int64_t Max = ((1LL << (21 - 1)) - 1);
1539 return Val >= Min && Val <= Max;
1540 }
1541
1542 return true;
1543 }
1544
1545 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1546 DiagnosticPredicate isMatrixRegOperand() const {
1547 if (!isMatrix())
1548 return DiagnosticPredicateTy::NoMatch;
1549 if (getMatrixKind() != Kind ||
1550 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1551 EltSize != getMatrixElementWidth())
1552 return DiagnosticPredicateTy::NearMatch;
1553 return DiagnosticPredicateTy::Match;
1554 }
1555
1556 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1557 // Add as immediates when possible. Null MCExpr = 0.
1558 if (!Expr)
1559 Inst.addOperand(MCOperand::createImm(0));
1560 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1561 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1562 else
1563 Inst.addOperand(MCOperand::createExpr(Expr));
1564 }
1565
1566 void addRegOperands(MCInst &Inst, unsigned N) const {
1567 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1568 Inst.addOperand(MCOperand::createReg(getReg()));
1569 }
1570
1571 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1572 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1573 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1574 }
1575
1576 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1577 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1578 assert((static_cast<void> (0))
1579 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))(static_cast<void> (0));
1580
1581 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1582 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1583 RI->getEncodingValue(getReg()));
1584
1585 Inst.addOperand(MCOperand::createReg(Reg));
1586 }
1587
1588 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1589 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1590 assert((static_cast<void> (0))
1591 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))(static_cast<void> (0));
1592
1593 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1594 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1595 RI->getEncodingValue(getReg()));
1596
1597 Inst.addOperand(MCOperand::createReg(Reg));
1598 }
1599
1600 template <int Width>
1601 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1602 unsigned Base;
1603 switch (Width) {
1604 case 8: Base = AArch64::B0; break;
1605 case 16: Base = AArch64::H0; break;
1606 case 32: Base = AArch64::S0; break;
1607 case 64: Base = AArch64::D0; break;
1608 case 128: Base = AArch64::Q0; break;
1609 default:
1610 llvm_unreachable("Unsupported width")__builtin_unreachable();
1611 }
1612 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1613 }
1614
1615 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1616 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1617 assert((static_cast<void> (0))
1618 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast<void> (0));
1619 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1620 }
1621
1622 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1623 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1624 assert((static_cast<void> (0))
1625 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast<void> (0));
1626 Inst.addOperand(MCOperand::createReg(getReg()));
1627 }
1628
1629 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1630 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1631 Inst.addOperand(MCOperand::createReg(getReg()));
1632 }
1633
1634 enum VecListIndexType {
1635 VecListIdx_DReg = 0,
1636 VecListIdx_QReg = 1,
1637 VecListIdx_ZReg = 2,
1638 };
1639
1640 template <VecListIndexType RegTy, unsigned NumRegs>
1641 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1642 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1643 static const unsigned FirstRegs[][5] = {
1644 /* DReg */ { AArch64::Q0,
1645 AArch64::D0, AArch64::D0_D1,
1646 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1647 /* QReg */ { AArch64::Q0,
1648 AArch64::Q0, AArch64::Q0_Q1,
1649 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1650 /* ZReg */ { AArch64::Z0,
1651 AArch64::Z0, AArch64::Z0_Z1,
1652 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1653 };
1654
1655 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(static_cast<void> (0))
1656 " NumRegs must be <= 4 for ZRegs")(static_cast<void> (0));
1657
1658 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1659 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1660 FirstRegs[(unsigned)RegTy][0]));
1661 }
1662
1663 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1664 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1665 unsigned RegMask = getMatrixTileListRegMask();
1666 assert(RegMask <= 0xFF && "Invalid mask!")(static_cast<void> (0));
1667 Inst.addOperand(MCOperand::createImm(RegMask));
1668 }
1669
1670 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1671 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1672 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1673 }
1674
1675 template <unsigned ImmIs0, unsigned ImmIs1>
1676 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1677 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1678 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")(static_cast<void> (0));
1679 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1680 }
1681
1682 void addImmOperands(MCInst &Inst, unsigned N) const {
1683 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1684 // If this is a pageoff symrefexpr with an addend, adjust the addend
1685 // to be only the page-offset portion. Otherwise, just add the expr
1686 // as-is.
1687 addExpr(Inst, getImm());
1688 }
1689
1690 template <int Shift>
1691 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1692 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
1693 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1694 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1695 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1696 } else if (isShiftedImm()) {
1697 addExpr(Inst, getShiftedImmVal());
1698 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1699 } else {
1700 addExpr(Inst, getImm());
1701 Inst.addOperand(MCOperand::createImm(0));
1702 }
1703 }
1704
1705 template <int Shift>
1706 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1707 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
1708 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1709 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1710 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1711 } else
1712 llvm_unreachable("Not a shifted negative immediate")__builtin_unreachable();
1713 }
1714
1715 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1716 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1717 Inst.addOperand(MCOperand::createImm(getCondCode()));
1718 }
1719
1720 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1721 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1722 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1723 if (!MCE)
1724 addExpr(Inst, getImm());
1725 else
1726 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1727 }
1728
1729 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1730 addImmOperands(Inst, N);
1731 }
1732
1733 template<int Scale>
1734 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1735 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1736 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1737
1738 if (!MCE) {
1739 Inst.addOperand(MCOperand::createExpr(getImm()));
1740 return;
1741 }
1742 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1743 }
1744
1745 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1746 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1747 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1748 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1749 }
1750
1751 template <int Scale>
1752 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1753 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1754 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1755 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1756 }
1757
1758 template <typename T>
1759 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1760 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1761 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1762 std::make_unsigned_t<T> Val = MCE->getValue();
1763 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1764 Inst.addOperand(MCOperand::createImm(encoding));
1765 }
1766
1767 template <typename T>
1768 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1769 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1770 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1771 std::make_unsigned_t<T> Val = ~MCE->getValue();
1772 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1773 Inst.addOperand(MCOperand::createImm(encoding));
1774 }
1775
1776 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1777 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1778 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1779 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1780 Inst.addOperand(MCOperand::createImm(encoding));
1781 }
1782
1783 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1784 // Branch operands don't encode the low bits, so shift them off
1785 // here. If it's a label, however, just put it on directly as there's
1786 // not enough information now to do anything.
1787 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1788 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1789 if (!MCE) {
1790 addExpr(Inst, getImm());
1791 return;
1792 }
1793 assert(MCE && "Invalid constant immediate operand!")(static_cast<void> (0));
1794 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1795 }
1796
1797 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1798 // Branch operands don't encode the low bits, so shift them off
1799 // here. If it's a label, however, just put it on directly as there's
1800 // not enough information now to do anything.
1801 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1802 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1803 if (!MCE) {
1804 addExpr(Inst, getImm());
1805 return;
1806 }
1807 assert(MCE && "Invalid constant immediate operand!")(static_cast<void> (0));
1808 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1809 }
1810
1811 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1812 // Branch operands don't encode the low bits, so shift them off
1813 // here. If it's a label, however, just put it on directly as there's
1814 // not enough information now to do anything.
1815 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1816 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1817 if (!MCE) {
1818 addExpr(Inst, getImm());
1819 return;
1820 }
1821 assert(MCE && "Invalid constant immediate operand!")(static_cast<void> (0));
1822 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1823 }
1824
1825 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1826 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1827 Inst.addOperand(MCOperand::createImm(
1828 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1829 }
1830
1831 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1832 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1833 Inst.addOperand(MCOperand::createImm(getBarrier()));
1834 }
1835
1836 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
1837 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1838 Inst.addOperand(MCOperand::createImm(getBarrier()));
1839 }
1840
1841 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1842 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1843
1844 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1845 }
1846
1847 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1848 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1849
1850 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1851 }
1852
1853 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1854 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1855
1856 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1857 }
1858
1859 void addSVCROperands(MCInst &Inst, unsigned N) const {
1860 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1861
1862 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
1863 }
1864
1865 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1866 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1867
1868 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1869 }
1870
1871 void addSysCROperands(MCInst &Inst, unsigned N) const {
1872 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1873 Inst.addOperand(MCOperand::createImm(getSysCR()));
1874 }
1875
1876 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1877 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1878 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1879 }
1880
1881 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1882 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1883 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1884 }
1885
1886 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1887 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1888 Inst.addOperand(MCOperand::createImm(getBTIHint()));
1889 }
1890
1891 void addShifterOperands(MCInst &Inst, unsigned N) const {
1892 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1893 unsigned Imm =
1894 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1895 Inst.addOperand(MCOperand::createImm(Imm));
1896 }
1897
1898 void addExtendOperands(MCInst &Inst, unsigned N) const {
1899 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1900 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1901 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1902 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1903 Inst.addOperand(MCOperand::createImm(Imm));
1904 }
1905
1906 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1907 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1908 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1909 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1910 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1911 Inst.addOperand(MCOperand::createImm(Imm));
1912 }
1913
1914 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1915 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
1916 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1917 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1918 Inst.addOperand(MCOperand::createImm(IsSigned));
1919 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1920 }
1921
1922 // For 8-bit load/store instructions with a register offset, both the
1923 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1924 // they're disambiguated by whether the shift was explicit or implicit rather
1925 // than its size.
1926 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1927 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
1928 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1929 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1930 Inst.addOperand(MCOperand::createImm(IsSigned));
1931 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1932 }
1933
1934 template<int Shift>
1935 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1936 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1937
1938 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1939 if (CE) {
1940 uint64_t Value = CE->getValue();
1941 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1942 } else {
1943 addExpr(Inst, getImm());
1944 }
1945 }
1946
1947 template<int Shift>
1948 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1949 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1950
1951 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1952 uint64_t Value = CE->getValue();
1953 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1954 }
1955
1956 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1957 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1958 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1959 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1960 }
1961
1962 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1963 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
1964 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1965 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1966 }
1967
1968 void print(raw_ostream &OS) const override;
1969
1970 static std::unique_ptr<AArch64Operand>
1971 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
1972 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1973 Op->Tok.Data = Str.data();
1974 Op->Tok.Length = Str.size();
1975 Op->Tok.IsSuffix = IsSuffix;
1976 Op->StartLoc = S;
1977 Op->EndLoc = S;
1978 return Op;
1979 }
1980
1981 static std::unique_ptr<AArch64Operand>
1982 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1983 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1984 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1985 unsigned ShiftAmount = 0,
1986 unsigned HasExplicitAmount = false) {
1987 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1988 Op->Reg.RegNum = RegNum;
1989 Op->Reg.Kind = Kind;
1990 Op->Reg.ElementWidth = 0;
1991 Op->Reg.EqualityTy = EqTy;
1992 Op->Reg.ShiftExtend.Type = ExtTy;
1993 Op->Reg.ShiftExtend.Amount = ShiftAmount;
1994 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1995 Op->StartLoc = S;
1996 Op->EndLoc = E;
1997 return Op;
1998 }
1999
2000 static std::unique_ptr<AArch64Operand>
2001 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2002 SMLoc S, SMLoc E, MCContext &Ctx,
2003 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2004 unsigned ShiftAmount = 0,
2005 unsigned HasExplicitAmount = false) {
2006 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(static_cast<void> (0))
2007 Kind == RegKind::SVEPredicateVector) &&(static_cast<void> (0))
2008 "Invalid vector kind")(static_cast<void> (0));
2009 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2010 HasExplicitAmount);
2011 Op->Reg.ElementWidth = ElementWidth;
2012 return Op;
2013 }
2014
2015 static std::unique_ptr<AArch64Operand>
2016 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
2017 unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
2018 MCContext &Ctx) {
2019 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2020 Op->VectorList.RegNum = RegNum;
2021 Op->VectorList.Count = Count;
2022 Op->VectorList.NumElements = NumElements;
2023 Op->VectorList.ElementWidth = ElementWidth;
2024 Op->VectorList.RegisterKind = RegisterKind;
2025 Op->StartLoc = S;
2026 Op->EndLoc = E;
2027 return Op;
2028 }
2029
2030 static std::unique_ptr<AArch64Operand>
2031 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2032 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2033 Op->VectorIndex.Val = Idx;
2034 Op->StartLoc = S;
2035 Op->EndLoc = E;
2036 return Op;
2037 }
2038
2039 static std::unique_ptr<AArch64Operand>
2040 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2041 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2042 Op->MatrixTileList.RegMask = RegMask;
2043 Op->StartLoc = S;
2044 Op->EndLoc = E;
2045 return Op;
2046 }
2047
2048 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2049 const unsigned ElementWidth) {
2050 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2051 RegMap = {
2052 {{0, AArch64::ZAB0},
2053 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2054 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2055 {{8, AArch64::ZAB0},
2056 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2057 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2058 {{16, AArch64::ZAH0},
2059 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2060 {{16, AArch64::ZAH1},
2061 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2062 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2063 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2064 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2065 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2066 };
2067
2068 if (ElementWidth == 64)
2069 OutRegs.insert(Reg);
2070 else {
2071 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2072 assert(!Regs.empty() && "Invalid tile or element width!")(static_cast<void> (0));
2073 for (auto OutReg : Regs)
2074 OutRegs.insert(OutReg);
2075 }
2076 }
2077
2078 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2079 SMLoc E, MCContext &Ctx) {
2080 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2081 Op->Imm.Val = Val;
2082 Op->StartLoc = S;
2083 Op->EndLoc = E;
2084 return Op;
2085 }
2086
2087 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2088 unsigned ShiftAmount,
2089 SMLoc S, SMLoc E,
2090 MCContext &Ctx) {
2091 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2092 Op->ShiftedImm .Val = Val;
2093 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2094 Op->StartLoc = S;
2095 Op->EndLoc = E;
2096 return Op;
2097 }
2098
2099 static std::unique_ptr<AArch64Operand>
2100 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2101 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2102 Op->CondCode.Code = Code;
2103 Op->StartLoc = S;
2104 Op->EndLoc = E;
2105 return Op;
2106 }
2107
2108 static std::unique_ptr<AArch64Operand>
2109 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2110 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2111 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2112 Op->FPImm.IsExact = IsExact;
2113 Op->StartLoc = S;
2114 Op->EndLoc = S;
2115 return Op;
2116 }
2117
2118 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2119 StringRef Str,
2120 SMLoc S,
2121 MCContext &Ctx,
2122 bool HasnXSModifier) {
2123 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2124 Op->Barrier.Val = Val;
2125 Op->Barrier.Data = Str.data();
2126 Op->Barrier.Length = Str.size();
2127 Op->Barrier.HasnXSModifier = HasnXSModifier;
2128 Op->StartLoc = S;
2129 Op->EndLoc = S;
2130 return Op;
2131 }
2132
2133 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2134 uint32_t MRSReg,
2135 uint32_t MSRReg,
2136 uint32_t PStateField,
2137 MCContext &Ctx) {
2138 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2139 Op->SysReg.Data = Str.data();
2140 Op->SysReg.Length = Str.size();
2141 Op->SysReg.MRSReg = MRSReg;
2142 Op->SysReg.MSRReg = MSRReg;
2143 Op->SysReg.PStateField = PStateField;
2144 Op->StartLoc = S;
2145 Op->EndLoc = S;
2146 return Op;
2147 }
2148
2149 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2150 SMLoc E, MCContext &Ctx) {
2151 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2152 Op->SysCRImm.Val = Val;
2153 Op->StartLoc = S;
2154 Op->EndLoc = E;
2155 return Op;
2156 }
2157
2158 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2159 StringRef Str,
2160 SMLoc S,
2161 MCContext &Ctx) {
2162 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2163 Op->Prefetch.Val = Val;
2164 Op->Barrier.Data = Str.data();
2165 Op->Barrier.Length = Str.size();
2166 Op->StartLoc = S;
2167 Op->EndLoc = S;
2168 return Op;
2169 }
2170
2171 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2172 StringRef Str,
2173 SMLoc S,
2174 MCContext &Ctx) {
2175 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2176 Op->PSBHint.Val = Val;
2177 Op->PSBHint.Data = Str.data();
2178 Op->PSBHint.Length = Str.size();
2179 Op->StartLoc = S;
2180 Op->EndLoc = S;
2181 return Op;
2182 }
2183
2184 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2185 StringRef Str,
2186 SMLoc S,
2187 MCContext &Ctx) {
2188 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2189 Op->BTIHint.Val = Val | 32;
2190 Op->BTIHint.Data = Str.data();
2191 Op->BTIHint.Length = Str.size();
2192 Op->StartLoc = S;
2193 Op->EndLoc = S;
2194 return Op;
2195 }
2196
2197 static std::unique_ptr<AArch64Operand>
2198 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2199 SMLoc S, SMLoc E, MCContext &Ctx) {
2200 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2201 Op->MatrixReg.RegNum = RegNum;
2202 Op->MatrixReg.ElementWidth = ElementWidth;
2203 Op->MatrixReg.Kind = Kind;
2204 Op->StartLoc = S;
2205 Op->EndLoc = E;
2206 return Op;
2207 }
2208
2209 static std::unique_ptr<AArch64Operand>
2210 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2211 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2212 Op->SVCR.PStateField = PStateField;
2213 Op->SVCR.Data = Str.data();
2214 Op->SVCR.Length = Str.size();
2215 Op->StartLoc = S;
2216 Op->EndLoc = S;
2217 return Op;
2218 }
2219
2220 static std::unique_ptr<AArch64Operand>
2221 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2222 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2223 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2224 Op->ShiftExtend.Type = ShOp;
2225 Op->ShiftExtend.Amount = Val;
2226 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2227 Op->StartLoc = S;
2228 Op->EndLoc = E;
2229 return Op;
2230 }
2231};
2232
2233} // end anonymous namespace.
2234
2235void AArch64Operand::print(raw_ostream &OS) const {
2236 switch (Kind) {
2237 case k_FPImm:
2238 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2239 if (!getFPImmIsExact())
2240 OS << " (inexact)";
2241 OS << ">";
2242 break;
2243 case k_Barrier: {
2244 StringRef Name = getBarrierName();
2245 if (!Name.empty())
2246 OS << "<barrier " << Name << ">";
2247 else
2248 OS << "<barrier invalid #" << getBarrier() << ">";
2249 break;
2250 }
2251 case k_Immediate:
2252 OS << *getImm();
2253 break;
2254 case k_ShiftedImm: {
2255 unsigned Shift = getShiftedImmShift();
2256 OS << "<shiftedimm ";
2257 OS << *getShiftedImmVal();
2258 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2259 break;
2260 }
2261 case k_CondCode:
2262 OS << "<condcode " << getCondCode() << ">";
2263 break;
2264 case k_VectorList: {
2265 OS << "<vectorlist ";
2266 unsigned Reg = getVectorListStart();
2267 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2268 OS << Reg + i << " ";
2269 OS << ">";
2270 break;
2271 }
2272 case k_VectorIndex:
2273 OS << "<vectorindex " << getVectorIndex() << ">";
2274 break;
2275 case k_SysReg:
2276 OS << "<sysreg: " << getSysReg() << '>';
2277 break;
2278 case k_Token:
2279 OS << "'" << getToken() << "'";
2280 break;
2281 case k_SysCR:
2282 OS << "c" << getSysCR();
2283 break;
2284 case k_Prefetch: {
2285 StringRef Name = getPrefetchName();
2286 if (!Name.empty())
2287 OS << "<prfop " << Name << ">";
2288 else
2289 OS << "<prfop invalid #" << getPrefetch() << ">";
2290 break;
2291 }
2292 case k_PSBHint:
2293 OS << getPSBHintName();
2294 break;
2295 case k_BTIHint:
2296 OS << getBTIHintName();
2297 break;
2298 case k_MatrixRegister:
2299 OS << "<matrix " << getMatrixReg() << ">";
2300 break;
2301 case k_MatrixTileList: {
2302 OS << "<matrixlist ";
2303 unsigned RegMask = getMatrixTileListRegMask();
2304 unsigned MaxBits = 8;
2305 for (unsigned I = MaxBits; I > 0; --I)
2306 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2307 OS << '>';
2308 break;
2309 }
2310 case k_SVCR: {
2311 OS << getSVCR();
2312 break;
2313 }
2314 case k_Register:
2315 OS << "<register " << getReg() << ">";
2316 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2317 break;
2318 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2319 case k_ShiftExtend:
2320 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2321 << getShiftExtendAmount();
2322 if (!hasShiftExtendAmount())
2323 OS << "<imp>";
2324 OS << '>';
2325 break;
2326 }
2327}
2328
2329/// @name Auto-generated Match Functions
2330/// {
2331
2332static unsigned MatchRegisterName(StringRef Name);
2333
2334/// }
2335
2336static unsigned MatchNeonVectorRegName(StringRef Name) {
2337 return StringSwitch<unsigned>(Name.lower())
2338 .Case("v0", AArch64::Q0)
2339 .Case("v1", AArch64::Q1)
2340 .Case("v2", AArch64::Q2)
2341 .Case("v3", AArch64::Q3)
2342 .Case("v4", AArch64::Q4)
2343 .Case("v5", AArch64::Q5)
2344 .Case("v6", AArch64::Q6)
2345 .Case("v7", AArch64::Q7)
2346 .Case("v8", AArch64::Q8)
2347 .Case("v9", AArch64::Q9)
2348 .Case("v10", AArch64::Q10)
2349 .Case("v11", AArch64::Q11)
2350 .Case("v12", AArch64::Q12)
2351 .Case("v13", AArch64::Q13)
2352 .Case("v14", AArch64::Q14)
2353 .Case("v15", AArch64::Q15)
2354 .Case("v16", AArch64::Q16)
2355 .Case("v17", AArch64::Q17)
2356 .Case("v18", AArch64::Q18)
2357 .Case("v19", AArch64::Q19)
2358 .Case("v20", AArch64::Q20)
2359 .Case("v21", AArch64::Q21)
2360 .Case("v22", AArch64::Q22)
2361 .Case("v23", AArch64::Q23)
2362 .Case("v24", AArch64::Q24)
2363 .Case("v25", AArch64::Q25)
2364 .Case("v26", AArch64::Q26)
2365 .Case("v27", AArch64::Q27)
2366 .Case("v28", AArch64::Q28)
2367 .Case("v29", AArch64::Q29)
2368 .Case("v30", AArch64::Q30)
2369 .Case("v31", AArch64::Q31)
2370 .Default(0);
2371}
2372
2373/// Returns an optional pair of (#elements, element-width) if Suffix
2374/// is a valid vector kind. Where the number of elements in a vector
2375/// or the vector width is implicit or explicitly unknown (but still a
2376/// valid suffix kind), 0 is used.
2377static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2378 RegKind VectorKind) {
2379 std::pair<int, int> Res = {-1, -1};
2380
2381 switch (VectorKind) {
2382 case RegKind::NeonVector:
2383 Res =
2384 StringSwitch<std::pair<int, int>>(Suffix.lower())
2385 .Case("", {0, 0})
2386 .Case(".1d", {1, 64})
2387 .Case(".1q", {1, 128})
2388 // '.2h' needed for fp16 scalar pairwise reductions
2389 .Case(".2h", {2, 16})
2390 .Case(".2s", {2, 32})
2391 .Case(".2d", {2, 64})
2392 // '.4b' is another special case for the ARMv8.2a dot product
2393 // operand
2394 .Case(".4b", {4, 8})
2395 .Case(".4h", {4, 16})
2396 .Case(".4s", {4, 32})
2397 .Case(".8b", {8, 8})
2398 .Case(".8h", {8, 16})
2399 .Case(".16b", {16, 8})
2400 // Accept the width neutral ones, too, for verbose syntax. If those
2401 // aren't used in the right places, the token operand won't match so
2402 // all will work out.
2403 .Case(".b", {0, 8})
2404 .Case(".h", {0, 16})
2405 .Case(".s", {0, 32})
2406 .Case(".d", {0, 64})
2407 .Default({-1, -1});
2408 break;
2409 case RegKind::SVEPredicateVector:
2410 case RegKind::SVEDataVector:
2411 case RegKind::Matrix:
2412 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2413 .Case("", {0, 0})
2414 .Case(".b", {0, 8})
2415 .Case(".h", {0, 16})
2416 .Case(".s", {0, 32})
2417 .Case(".d", {0, 64})
2418 .Case(".q", {0, 128})
2419 .Default({-1, -1});
2420 break;
2421 default:
2422 llvm_unreachable("Unsupported RegKind")__builtin_unreachable();
2423 }
2424
2425 if (Res == std::make_pair(-1, -1))
2426 return Optional<std::pair<int, int>>();
2427
2428 return Optional<std::pair<int, int>>(Res);
2429}
2430
2431static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2432 return parseVectorKind(Suffix, VectorKind).hasValue();
2433}
2434
2435static unsigned matchSVEDataVectorRegName(StringRef Name) {
2436 return StringSwitch<unsigned>(Name.lower())
2437 .Case("z0", AArch64::Z0)
2438 .Case("z1", AArch64::Z1)
2439 .Case("z2", AArch64::Z2)
2440 .Case("z3", AArch64::Z3)
2441 .Case("z4", AArch64::Z4)
2442 .Case("z5", AArch64::Z5)
2443 .Case("z6", AArch64::Z6)
2444 .Case("z7", AArch64::Z7)
2445 .Case("z8", AArch64::Z8)
2446 .Case("z9", AArch64::Z9)
2447 .Case("z10", AArch64::Z10)
2448 .Case("z11", AArch64::Z11)
2449 .Case("z12", AArch64::Z12)
2450 .Case("z13", AArch64::Z13)
2451 .Case("z14", AArch64::Z14)
2452 .Case("z15", AArch64::Z15)
2453 .Case("z16", AArch64::Z16)
2454 .Case("z17", AArch64::Z17)
2455 .Case("z18", AArch64::Z18)
2456 .Case("z19", AArch64::Z19)
2457 .Case("z20", AArch64::Z20)
2458 .Case("z21", AArch64::Z21)
2459 .Case("z22", AArch64::Z22)
2460 .Case("z23", AArch64::Z23)
2461 .Case("z24", AArch64::Z24)
2462 .Case("z25", AArch64::Z25)
2463 .Case("z26", AArch64::Z26)
2464 .Case("z27", AArch64::Z27)
2465 .Case("z28", AArch64::Z28)
2466 .Case("z29", AArch64::Z29)
2467 .Case("z30", AArch64::Z30)
2468 .Case("z31", AArch64::Z31)
2469 .Default(0);
2470}
2471
2472static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2473 return StringSwitch<unsigned>(Name.lower())
2474 .Case("p0", AArch64::P0)
2475 .Case("p1", AArch64::P1)
2476 .Case("p2", AArch64::P2)
2477 .Case("p3", AArch64::P3)
2478 .Case("p4", AArch64::P4)
2479 .Case("p5", AArch64::P5)
2480 .Case("p6", AArch64::P6)
2481 .Case("p7", AArch64::P7)
2482 .Case("p8", AArch64::P8)
2483 .Case("p9", AArch64::P9)
2484 .Case("p10", AArch64::P10)
2485 .Case("p11", AArch64::P11)
2486 .Case("p12", AArch64::P12)
2487 .Case("p13", AArch64::P13)
2488 .Case("p14", AArch64::P14)
2489 .Case("p15", AArch64::P15)
2490 .Default(0);
2491}
2492
2493static unsigned matchMatrixTileListRegName(StringRef Name) {
2494 return StringSwitch<unsigned>(Name.lower())
2495 .Case("za0.d", AArch64::ZAD0)
2496 .Case("za1.d", AArch64::ZAD1)
2497 .Case("za2.d", AArch64::ZAD2)
2498 .Case("za3.d", AArch64::ZAD3)
2499 .Case("za4.d", AArch64::ZAD4)
2500 .Case("za5.d", AArch64::ZAD5)
2501 .Case("za6.d", AArch64::ZAD6)
2502 .Case("za7.d", AArch64::ZAD7)
2503 .Case("za0.s", AArch64::ZAS0)
2504 .Case("za1.s", AArch64::ZAS1)
2505 .Case("za2.s", AArch64::ZAS2)
2506 .Case("za3.s", AArch64::ZAS3)
2507 .Case("za0.h", AArch64::ZAH0)
2508 .Case("za1.h", AArch64::ZAH1)
2509 .Case("za0.b", AArch64::ZAB0)
2510 .Default(0);
2511}
2512
2513static unsigned matchMatrixRegName(StringRef Name) {
2514 return StringSwitch<unsigned>(Name.lower())
2515 .Case("za", AArch64::ZA)
2516 .Case("za0.q", AArch64::ZAQ0)
2517 .Case("za1.q", AArch64::ZAQ1)
2518 .Case("za2.q", AArch64::ZAQ2)
2519 .Case("za3.q", AArch64::ZAQ3)
2520 .Case("za4.q", AArch64::ZAQ4)
2521 .Case("za5.q", AArch64::ZAQ5)
2522 .Case("za6.q", AArch64::ZAQ6)
2523 .Case("za7.q", AArch64::ZAQ7)
2524 .Case("za8.q", AArch64::ZAQ8)
2525 .Case("za9.q", AArch64::ZAQ9)
2526 .Case("za10.q", AArch64::ZAQ10)
2527 .Case("za11.q", AArch64::ZAQ11)
2528 .Case("za12.q", AArch64::ZAQ12)
2529 .Case("za13.q", AArch64::ZAQ13)
2530 .Case("za14.q", AArch64::ZAQ14)
2531 .Case("za15.q", AArch64::ZAQ15)
2532 .Case("za0.d", AArch64::ZAD0)
2533 .Case("za1.d", AArch64::ZAD1)
2534 .Case("za2.d", AArch64::ZAD2)
2535 .Case("za3.d", AArch64::ZAD3)
2536 .Case("za4.d", AArch64::ZAD4)
2537 .Case("za5.d", AArch64::ZAD5)
2538 .Case("za6.d", AArch64::ZAD6)
2539 .Case("za7.d", AArch64::ZAD7)
2540 .Case("za0.s", AArch64::ZAS0)
2541 .Case("za1.s", AArch64::ZAS1)
2542 .Case("za2.s", AArch64::ZAS2)
2543 .Case("za3.s", AArch64::ZAS3)
2544 .Case("za0.h", AArch64::ZAH0)
2545 .Case("za1.h", AArch64::ZAH1)
2546 .Case("za0.b", AArch64::ZAB0)
2547 .Case("za0h.q", AArch64::ZAQ0)
2548 .Case("za1h.q", AArch64::ZAQ1)
2549 .Case("za2h.q", AArch64::ZAQ2)
2550 .Case("za3h.q", AArch64::ZAQ3)
2551 .Case("za4h.q", AArch64::ZAQ4)
2552 .Case("za5h.q", AArch64::ZAQ5)
2553 .Case("za6h.q", AArch64::ZAQ6)
2554 .Case("za7h.q", AArch64::ZAQ7)
2555 .Case("za8h.q", AArch64::ZAQ8)
2556 .Case("za9h.q", AArch64::ZAQ9)
2557 .Case("za10h.q", AArch64::ZAQ10)
2558 .Case("za11h.q", AArch64::ZAQ11)
2559 .Case("za12h.q", AArch64::ZAQ12)
2560 .Case("za13h.q", AArch64::ZAQ13)
2561 .Case("za14h.q", AArch64::ZAQ14)
2562 .Case("za15h.q", AArch64::ZAQ15)
2563 .Case("za0h.d", AArch64::ZAD0)
2564 .Case("za1h.d", AArch64::ZAD1)
2565 .Case("za2h.d", AArch64::ZAD2)
2566 .Case("za3h.d", AArch64::ZAD3)
2567 .Case("za4h.d", AArch64::ZAD4)
2568 .Case("za5h.d", AArch64::ZAD5)
2569 .Case("za6h.d", AArch64::ZAD6)
2570 .Case("za7h.d", AArch64::ZAD7)
2571 .Case("za0h.s", AArch64::ZAS0)
2572 .Case("za1h.s", AArch64::ZAS1)
2573 .Case("za2h.s", AArch64::ZAS2)
2574 .Case("za3h.s", AArch64::ZAS3)
2575 .Case("za0h.h", AArch64::ZAH0)
2576 .Case("za1h.h", AArch64::ZAH1)
2577 .Case("za0h.b", AArch64::ZAB0)
2578 .Case("za0v.q", AArch64::ZAQ0)
2579 .Case("za1v.q", AArch64::ZAQ1)
2580 .Case("za2v.q", AArch64::ZAQ2)
2581 .Case("za3v.q", AArch64::ZAQ3)
2582 .Case("za4v.q", AArch64::ZAQ4)
2583 .Case("za5v.q", AArch64::ZAQ5)
2584 .Case("za6v.q", AArch64::ZAQ6)
2585 .Case("za7v.q", AArch64::ZAQ7)
2586 .Case("za8v.q", AArch64::ZAQ8)
2587 .Case("za9v.q", AArch64::ZAQ9)
2588 .Case("za10v.q", AArch64::ZAQ10)
2589 .Case("za11v.q", AArch64::ZAQ11)
2590 .Case("za12v.q", AArch64::ZAQ12)
2591 .Case("za13v.q", AArch64::ZAQ13)
2592 .Case("za14v.q", AArch64::ZAQ14)
2593 .Case("za15v.q", AArch64::ZAQ15)
2594 .Case("za0v.d", AArch64::ZAD0)
2595 .Case("za1v.d", AArch64::ZAD1)
2596 .Case("za2v.d", AArch64::ZAD2)
2597 .Case("za3v.d", AArch64::ZAD3)
2598 .Case("za4v.d", AArch64::ZAD4)
2599 .Case("za5v.d", AArch64::ZAD5)
2600 .Case("za6v.d", AArch64::ZAD6)
2601 .Case("za7v.d", AArch64::ZAD7)
2602 .Case("za0v.s", AArch64::ZAS0)
2603 .Case("za1v.s", AArch64::ZAS1)
2604 .Case("za2v.s", AArch64::ZAS2)
2605 .Case("za3v.s", AArch64::ZAS3)
2606 .Case("za0v.h", AArch64::ZAH0)
2607 .Case("za1v.h", AArch64::ZAH1)
2608 .Case("za0v.b", AArch64::ZAB0)
2609 .Default(0);
2610}
2611
2612bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2613 SMLoc &EndLoc) {
2614 return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
26
Calling 'AArch64AsmParser::tryParseRegister'
32
Returning from 'AArch64AsmParser::tryParseRegister'
33
Returning without writing to 'RegNo'
2615}
2616
2617OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2618 SMLoc &StartLoc,
2619 SMLoc &EndLoc) {
2620 StartLoc = getLoc();
2621 auto Res = tryParseScalarRegister(RegNo);
27
Calling 'AArch64AsmParser::tryParseScalarRegister'
30
Returning from 'AArch64AsmParser::tryParseScalarRegister'
2622 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2623 return Res;
31
Returning without writing to 'RegNo'
2624}
2625
2626// Matches a register name or register alias previously defined by '.req'
2627unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2628 RegKind Kind) {
2629 unsigned RegNum = 0;
2630 if ((RegNum = matchSVEDataVectorRegName(Name)))
2631 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2632
2633 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2634 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2635
2636 if ((RegNum = MatchNeonVectorRegName(Name)))
2637 return Kind == RegKind::NeonVector ? RegNum : 0;
2638
2639 if ((RegNum = matchMatrixRegName(Name)))
2640 return Kind == RegKind::Matrix ? RegNum : 0;
2641
2642 // The parsed register must be of RegKind Scalar
2643 if ((RegNum = MatchRegisterName(Name)))
2644 return Kind == RegKind::Scalar ? RegNum : 0;
2645
2646 if (!RegNum) {
2647 // Handle a few common aliases of registers.
2648 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2649 .Case("fp", AArch64::FP)
2650 .Case("lr", AArch64::LR)
2651 .Case("x31", AArch64::XZR)
2652 .Case("w31", AArch64::WZR)
2653 .Default(0))
2654 return Kind == RegKind::Scalar ? RegNum : 0;
2655
2656 // Check for aliases registered via .req. Canonicalize to lower case.
2657 // That's more consistent since register names are case insensitive, and
2658 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2659 auto Entry = RegisterReqs.find(Name.lower());
2660 if (Entry == RegisterReqs.end())
2661 return 0;
2662
2663 // set RegNum if the match is the right kind of register
2664 if (Kind == Entry->getValue().first)
2665 RegNum = Entry->getValue().second;
2666 }
2667 return RegNum;
2668}
2669
2670/// tryParseScalarRegister - Try to parse a register name. The token must be an
2671/// Identifier when called, and if it is a register name the token is eaten and
2672/// the register is added to the operand list.
2673OperandMatchResultTy
2674AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2675 const AsmToken &Tok = getTok();
2676 if (Tok.isNot(AsmToken::Identifier))
28
Taking true branch
2677 return MatchOperand_NoMatch;
29
Returning without writing to 'Reg'
2678
2679 std::string lowerCase = Tok.getString().lower();
2680 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2681 if (Reg == 0)
2682 return MatchOperand_NoMatch;
2683
2684 RegNum = Reg;
2685 Lex(); // Eat identifier token.
2686 return MatchOperand_Success;
2687}
2688
2689/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2690OperandMatchResultTy
2691AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2692 SMLoc S = getLoc();
2693
2694 if (getTok().isNot(AsmToken::Identifier)) {
2695 Error(S, "Expected cN operand where 0 <= N <= 15");
2696 return MatchOperand_ParseFail;
2697 }
2698
2699 StringRef Tok = getTok().getIdentifier();
2700 if (Tok[0] != 'c' && Tok[0] != 'C') {
2701 Error(S, "Expected cN operand where 0 <= N <= 15");
2702 return MatchOperand_ParseFail;
2703 }
2704
2705 uint32_t CRNum;
2706 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2707 if (BadNum || CRNum > 15) {
2708 Error(S, "Expected cN operand where 0 <= N <= 15");
2709 return MatchOperand_ParseFail;
2710 }
2711
2712 Lex(); // Eat identifier token.
2713 Operands.push_back(
2714 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2715 return MatchOperand_Success;
2716}
2717
2718/// tryParsePrefetch - Try to parse a prefetch operand.
2719template <bool IsSVEPrefetch>
2720OperandMatchResultTy
2721AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2722 SMLoc S = getLoc();
2723 const AsmToken &Tok = getTok();
2724
2725 auto LookupByName = [](StringRef N) {
2726 if (IsSVEPrefetch) {
2727 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2728 return Optional<unsigned>(Res->Encoding);
2729 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2730 return Optional<unsigned>(Res->Encoding);
2731 return Optional<unsigned>();
2732 };
2733
2734 auto LookupByEncoding = [](unsigned E) {
2735 if (IsSVEPrefetch) {
2736 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2737 return Optional<StringRef>(Res->Name);
2738 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2739 return Optional<StringRef>(Res->Name);
2740 return Optional<StringRef>();
2741 };
2742 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2743
2744 // Either an identifier for named values or a 5-bit immediate.
2745 // Eat optional hash.
2746 if (parseOptionalToken(AsmToken::Hash) ||
2747 Tok.is(AsmToken::Integer)) {
2748 const MCExpr *ImmVal;
2749 if (getParser().parseExpression(ImmVal))
2750 return MatchOperand_ParseFail;
2751
2752 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2753 if (!MCE) {
2754 TokError("immediate value expected for prefetch operand");
2755 return MatchOperand_ParseFail;
2756 }
2757 unsigned prfop = MCE->getValue();
2758 if (prfop > MaxVal) {
2759 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2760 "] expected");
2761 return MatchOperand_ParseFail;
2762 }
2763
2764 auto PRFM = LookupByEncoding(MCE->getValue());
2765 Operands.push_back(AArch64Operand::CreatePrefetch(
2766 prfop, PRFM.getValueOr(""), S, getContext()));
2767 return MatchOperand_Success;
2768 }
2769
2770 if (Tok.isNot(AsmToken::Identifier)) {
2771 TokError("prefetch hint expected");
2772 return MatchOperand_ParseFail;
2773 }
2774
2775 auto PRFM = LookupByName(Tok.getString());
2776 if (!PRFM) {
2777 TokError("prefetch hint expected");
2778 return MatchOperand_ParseFail;
2779 }
2780
2781 Operands.push_back(AArch64Operand::CreatePrefetch(
2782 *PRFM, Tok.getString(), S, getContext()));
2783 Lex(); // Eat identifier token.
2784 return MatchOperand_Success;
2785}
2786
2787/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2788OperandMatchResultTy
2789AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2790 SMLoc S = getLoc();
2791 const AsmToken &Tok = getTok();
2792 if (Tok.isNot(AsmToken::Identifier)) {
2793 TokError("invalid operand for instruction");
2794 return MatchOperand_ParseFail;
2795 }
2796
2797 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2798 if (!PSB) {
2799 TokError("invalid operand for instruction");
2800 return MatchOperand_ParseFail;
2801 }
2802
2803 Operands.push_back(AArch64Operand::CreatePSBHint(
2804 PSB->Encoding, Tok.getString(), S, getContext()));
2805 Lex(); // Eat identifier token.
2806 return MatchOperand_Success;
2807}
2808
2809/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2810OperandMatchResultTy
2811AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2812 SMLoc S = getLoc();
2813 const AsmToken &Tok = getTok();
2814 if (Tok.isNot(AsmToken::Identifier)) {
2815 TokError("invalid operand for instruction");
2816 return MatchOperand_ParseFail;
2817 }
2818
2819 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2820 if (!BTI) {
2821 TokError("invalid operand for instruction");
2822 return MatchOperand_ParseFail;
2823 }
2824
2825 Operands.push_back(AArch64Operand::CreateBTIHint(
2826 BTI->Encoding, Tok.getString(), S, getContext()));
2827 Lex(); // Eat identifier token.
2828 return MatchOperand_Success;
2829}
2830
2831/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2832/// instruction.
2833OperandMatchResultTy
2834AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2835 SMLoc S = getLoc();
2836 const MCExpr *Expr = nullptr;
2837
2838 if (getTok().is(AsmToken::Hash)) {
2839 Lex(); // Eat hash token.
2840 }
2841
2842 if (parseSymbolicImmVal(Expr))
2843 return MatchOperand_ParseFail;
2844
2845 AArch64MCExpr::VariantKind ELFRefKind;
2846 MCSymbolRefExpr::VariantKind DarwinRefKind;
2847 int64_t Addend;
2848 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2849 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2850 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2851 // No modifier was specified at all; this is the syntax for an ELF basic
2852 // ADRP relocation (unfortunately).
2853 Expr =
2854 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2855 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2856 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2857 Addend != 0) {
2858 Error(S, "gotpage label reference not allowed an addend");
2859 return MatchOperand_ParseFail;
2860 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2861 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2862 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2863 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2864 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2865 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
2866 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2867 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2868 // The operand must be an @page or @gotpage qualified symbolref.
2869 Error(S, "page or gotpage label reference expected");
2870 return MatchOperand_ParseFail;
2871 }
2872 }
2873
2874 // We have either a label reference possibly with addend or an immediate. The
2875 // addend is a raw value here. The linker will adjust it to only reference the
2876 // page.
2877 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2878 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2879
2880 return MatchOperand_Success;
2881}
2882
2883/// tryParseAdrLabel - Parse and validate a source label for the ADR
2884/// instruction.
2885OperandMatchResultTy
2886AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2887 SMLoc S = getLoc();
2888 const MCExpr *Expr = nullptr;
2889
2890 // Leave anything with a bracket to the default for SVE
2891 if (getTok().is(AsmToken::LBrac))
2892 return MatchOperand_NoMatch;
2893
2894 if (getTok().is(AsmToken::Hash))
2895 Lex(); // Eat hash token.
2896
2897 if (parseSymbolicImmVal(Expr))
2898 return MatchOperand_ParseFail;
2899
2900 AArch64MCExpr::VariantKind ELFRefKind;
2901 MCSymbolRefExpr::VariantKind DarwinRefKind;
2902 int64_t Addend;
2903 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2904 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2905 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2906 // No modifier was specified at all; this is the syntax for an ELF basic
2907 // ADR relocation (unfortunately).
2908 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2909 } else {
2910 Error(S, "unexpected adr label");
2911 return MatchOperand_ParseFail;
2912 }
2913 }
2914
2915 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2916 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2917 return MatchOperand_Success;
2918}
2919
2920/// tryParseFPImm - A floating point immediate expression operand.
2921template<bool AddFPZeroAsLiteral>
2922OperandMatchResultTy
2923AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2924 SMLoc S = getLoc();
2925
2926 bool Hash = parseOptionalToken(AsmToken::Hash);
2927
2928 // Handle negation, as that still comes through as a separate token.
2929 bool isNegative = parseOptionalToken(AsmToken::Minus);
2930
2931 const AsmToken &Tok = getTok();
2932 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2933 if (!Hash)
2934 return MatchOperand_NoMatch;
2935 TokError("invalid floating point immediate");
2936 return MatchOperand_ParseFail;
2937 }
2938
2939 // Parse hexadecimal representation.
2940 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2941 if (Tok.getIntVal() > 255 || isNegative) {
2942 TokError("encoded floating point value out of range");
2943 return MatchOperand_ParseFail;
2944 }
2945
2946 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2947 Operands.push_back(
2948 AArch64Operand::CreateFPImm(F, true, S, getContext()));
2949 } else {
2950 // Parse FP representation.
2951 APFloat RealVal(APFloat::IEEEdouble());
2952 auto StatusOrErr =
2953 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2954 if (errorToBool(StatusOrErr.takeError())) {
2955 TokError("invalid floating point representation");
2956 return MatchOperand_ParseFail;
2957 }
2958
2959 if (isNegative)
2960 RealVal.changeSign();
2961
2962 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2963 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
2964 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
2965 } else
2966 Operands.push_back(AArch64Operand::CreateFPImm(
2967 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2968 }
2969
2970 Lex(); // Eat the token.
2971
2972 return MatchOperand_Success;
2973}
2974
2975/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2976/// a shift suffix, for example '#1, lsl #12'.
2977OperandMatchResultTy
2978AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2979 SMLoc S = getLoc();
2980
2981 if (getTok().is(AsmToken::Hash))
2982 Lex(); // Eat '#'
2983 else if (getTok().isNot(AsmToken::Integer))
2984 // Operand should start from # or should be integer, emit error otherwise.
2985 return MatchOperand_NoMatch;
2986
2987 const MCExpr *Imm = nullptr;
2988 if (parseSymbolicImmVal(Imm))
2989 return MatchOperand_ParseFail;
2990 else if (getTok().isNot(AsmToken::Comma)) {
2991 Operands.push_back(
2992 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
2993 return MatchOperand_Success;
2994 }
2995
2996 // Eat ','
2997 Lex();
2998
2999 // The optional operand must be "lsl #N" where N is non-negative.
3000 if (!getTok().is(AsmToken::Identifier) ||
3001 !getTok().getIdentifier().equals_insensitive("lsl")) {
3002 Error(getLoc(), "only 'lsl #+N' valid after immediate");
3003 return MatchOperand_ParseFail;
3004 }
3005
3006 // Eat 'lsl'
3007 Lex();
3008
3009 parseOptionalToken(AsmToken::Hash);
3010
3011 if (getTok().isNot(AsmToken::Integer)) {
3012 Error(getLoc(), "only 'lsl #+N' valid after immediate");
3013 return MatchOperand_ParseFail;
3014 }
3015
3016 int64_t ShiftAmount = getTok().getIntVal();
3017
3018 if (ShiftAmount < 0) {
3019 Error(getLoc(), "positive shift amount required");
3020 return MatchOperand_ParseFail;
3021 }
3022 Lex(); // Eat the number
3023
3024 // Just in case the optional lsl #0 is used for immediates other than zero.
3025 if (ShiftAmount == 0 && Imm != nullptr) {
3026 Operands.push_back(
3027 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3028 return MatchOperand_Success;
3029 }
3030
3031 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3032 getLoc(), getContext()));
3033 return MatchOperand_Success;
3034}
3035
3036/// parseCondCodeString - Parse a Condition Code string.
3037AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
3038 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3039 .Case("eq", AArch64CC::EQ)
3040 .Case("ne", AArch64CC::NE)
3041 .Case("cs", AArch64CC::HS)
3042 .Case("hs", AArch64CC::HS)
3043 .Case("cc", AArch64CC::LO)
3044 .Case("lo", AArch64CC::LO)
3045 .Case("mi", AArch64CC::MI)
3046 .Case("pl", AArch64CC::PL)
3047 .Case("vs", AArch64CC::VS)
3048 .Case("vc", AArch64CC::VC)
3049 .Case("hi", AArch64CC::HI)
3050 .Case("ls", AArch64CC::LS)
3051 .Case("ge", AArch64CC::GE)
3052 .Case("lt", AArch64CC::LT)
3053 .Case("gt", AArch64CC::GT)
3054 .Case("le", AArch64CC::LE)
3055 .Case("al", AArch64CC::AL)
3056 .Case("nv", AArch64CC::NV)
3057 .Default(AArch64CC::Invalid);
3058
3059 if (CC == AArch64CC::Invalid &&
3060 getSTI().getFeatureBits()[AArch64::FeatureSVE])
3061 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3062 .Case("none", AArch64CC::EQ)
3063 .Case("any", AArch64CC::NE)
3064 .Case("nlast", AArch64CC::HS)
3065 .Case("last", AArch64CC::LO)
3066 .Case("first", AArch64CC::MI)
3067 .Case("nfrst", AArch64CC::PL)
3068 .Case("pmore", AArch64CC::HI)
3069 .Case("plast", AArch64CC::LS)
3070 .Case("tcont", AArch64CC::GE)
3071 .Case("tstop", AArch64CC::LT)
3072 .Default(AArch64CC::Invalid);
3073
3074 return CC;
3075}
3076
3077/// parseCondCode - Parse a Condition Code operand.
3078bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3079 bool invertCondCode) {
3080 SMLoc S = getLoc();
3081 const AsmToken &Tok = getTok();
3082 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")(static_cast<void> (0));
3083
3084 StringRef Cond = Tok.getString();
3085 AArch64CC::CondCode CC = parseCondCodeString(Cond);
3086 if (CC == AArch64CC::Invalid)
3087 return TokError("invalid condition code");
3088 Lex(); // Eat identifier token.
3089
3090 if (invertCondCode) {
3091 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3092 return TokError("condition codes AL and NV are invalid for this instruction");
3093 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
3094 }
3095
3096 Operands.push_back(
3097 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3098 return false;
3099}
3100
3101OperandMatchResultTy
3102AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3103 const AsmToken &Tok = getTok();
3104 SMLoc S = getLoc();
3105
3106 if (Tok.isNot(AsmToken::Identifier)) {
3107 TokError("invalid operand for instruction");
3108 return MatchOperand_ParseFail;
3109 }
3110
3111 unsigned PStateImm = -1;
3112 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3113 if (SVCR && SVCR->haveFeatures(getSTI().getFeatureBits()))
3114 PStateImm = SVCR->Encoding;
3115
3116 Operands.push_back(
3117 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3118 Lex(); // Eat identifier token.
3119 return MatchOperand_Success;
3120}
3121
3122OperandMatchResultTy
3123AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3124 const AsmToken &Tok = getTok();
3125 SMLoc S = getLoc();
3126
3127 StringRef Name = Tok.getString();
3128
3129 if (Name.equals_insensitive("za")) {
3130 Lex(); // eat "za"
3131 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3132 AArch64::ZA, /*ElementWidth=*/0, MatrixKind::Array, S, getLoc(),
3133 getContext()));
3134 if (getLexer().is(AsmToken::LBrac)) {
3135 // There's no comma after matrix operand, so we can parse the next operand
3136 // immediately.
3137 if (parseOperand(Operands, false, false))
3138 return MatchOperand_NoMatch;
3139 }
3140 return MatchOperand_Success;
3141 }
3142
3143 // Try to parse matrix register.
3144 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3145 if (!Reg)
3146 return MatchOperand_NoMatch;
3147
3148 size_t DotPosition = Name.find('.');
3149 assert(DotPosition != StringRef::npos && "Unexpected register")(static_cast<void> (0));
3150
3151 StringRef Head = Name.take_front(DotPosition);
3152 StringRef Tail = Name.drop_front(DotPosition);
3153 StringRef RowOrColumn = Head.take_back();
3154
3155 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn)
3156 .Case("h", MatrixKind::Row)
3157 .Case("v", MatrixKind::Col)
3158 .Default(MatrixKind::Tile);
3159
3160 // Next up, parsing the suffix
3161 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3162 if (!KindRes) {
3163 TokError("Expected the register to be followed by element width suffix");
3164 return MatchOperand_ParseFail;
3165 }
3166 unsigned ElementWidth = KindRes->second;
3167
3168 Lex();
3169
3170 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3171 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3172
3173 if (getLexer().is(AsmToken::LBrac)) {
3174 // There's no comma after matrix operand, so we can parse the next operand
3175 // immediately.
3176 if (parseOperand(Operands, false, false))
3177 return MatchOperand_NoMatch;
3178 }
3179 return MatchOperand_Success;
3180}
3181
3182/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3183/// them if present.
3184OperandMatchResultTy
3185AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3186 const AsmToken &Tok = getTok();
3187 std::string LowerID = Tok.getString().lower();
3188 AArch64_AM::ShiftExtendType ShOp =
3189 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3190 .Case("lsl", AArch64_AM::LSL)
3191 .Case("lsr", AArch64_AM::LSR)
3192 .Case("asr", AArch64_AM::ASR)
3193 .Case("ror", AArch64_AM::ROR)
3194 .Case("msl", AArch64_AM::MSL)
3195 .Case("uxtb", AArch64_AM::UXTB)
3196 .Case("uxth", AArch64_AM::UXTH)
3197 .Case("uxtw", AArch64_AM::UXTW)
3198 .Case("uxtx", AArch64_AM::UXTX)
3199 .Case("sxtb", AArch64_AM::SXTB)
3200 .Case("sxth", AArch64_AM::SXTH)
3201 .Case("sxtw", AArch64_AM::SXTW)
3202 .Case("sxtx", AArch64_AM::SXTX)
3203 .Default(AArch64_AM::InvalidShiftExtend);
3204
3205 if (ShOp == AArch64_AM::InvalidShiftExtend)
3206 return MatchOperand_NoMatch;
3207
3208 SMLoc S = Tok.getLoc();
3209 Lex();
3210
3211 bool Hash = parseOptionalToken(AsmToken::Hash);
3212
3213 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3214 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3215 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3216 ShOp == AArch64_AM::MSL) {
3217 // We expect a number here.
3218 TokError("expected #imm after shift specifier");
3219 return MatchOperand_ParseFail;
3220 }
3221
3222 // "extend" type operations don't need an immediate, #0 is implicit.
3223 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3224 Operands.push_back(
3225 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3226 return MatchOperand_Success;
3227 }
3228
3229 // Make sure we do actually have a number, identifier or a parenthesized
3230 // expression.
3231 SMLoc E = getLoc();
3232 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3233 !getTok().is(AsmToken::Identifier)) {
3234 Error(E, "expected integer shift amount");
3235 return MatchOperand_ParseFail;
3236 }
3237
3238 const MCExpr *ImmVal;
3239 if (getParser().parseExpression(ImmVal))
3240 return MatchOperand_ParseFail;
3241
3242 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3243 if (!MCE) {
3244 Error(E, "expected constant '#imm' after shift specifier");
3245 return MatchOperand_ParseFail;
3246 }
3247
3248 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3249 Operands.push_back(AArch64Operand::CreateShiftExtend(
3250 ShOp, MCE->getValue(), true, S, E, getContext()));
3251 return MatchOperand_Success;
3252}
3253
3254static const struct Extension {
3255 const char *Name;
3256 const FeatureBitset Features;
3257} ExtensionMap[] = {
3258 {"crc", {AArch64::FeatureCRC}},
3259 {"sm4", {AArch64::FeatureSM4}},
3260 {"sha3", {AArch64::FeatureSHA3}},
3261 {"sha2", {AArch64::FeatureSHA2}},
3262 {"aes", {AArch64::FeatureAES}},
3263 {"crypto", {AArch64::FeatureCrypto}},
3264 {"fp", {AArch64::FeatureFPARMv8}},
3265 {"simd", {AArch64::FeatureNEON}},
3266 {"ras", {AArch64::FeatureRAS}},
3267 {"lse", {AArch64::FeatureLSE}},
3268 {"predres", {AArch64::FeaturePredRes}},
3269 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3270 {"mte", {AArch64::FeatureMTE}},
3271 {"memtag", {AArch64::FeatureMTE}},
3272 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3273 {"pan", {AArch64::FeaturePAN}},
3274 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3275 {"ccpp", {AArch64::FeatureCCPP}},
3276 {"rcpc", {AArch64::FeatureRCPC}},
3277 {"rng", {AArch64::FeatureRandGen}},
3278 {"sve", {AArch64::FeatureSVE}},
3279 {"sve2", {AArch64::FeatureSVE2}},
3280 {"sve2-aes", {AArch64::FeatureSVE2AES}},
3281 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3282 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3283 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3284 {"ls64", {AArch64::FeatureLS64}},
3285 {"xs", {AArch64::FeatureXS}},
3286 {"pauth", {AArch64::FeaturePAuth}},
3287 {"flagm", {AArch64::FeatureFlagM}},
3288 {"rme", {AArch64::FeatureRME}},
3289 {"sme", {AArch64::FeatureSME}},
3290 {"sme-f64", {AArch64::FeatureSMEF64}},
3291 {"sme-i64", {AArch64::FeatureSMEI64}},
3292 // FIXME: Unsupported extensions
3293 {"lor", {}},
3294 {"rdma", {}},
3295 {"profile", {}},
3296};
3297
3298static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3299 if (FBS[AArch64::HasV8_1aOps])
3300 Str += "ARMv8.1a";
3301 else if (FBS[AArch64::HasV8_2aOps])
3302 Str += "ARMv8.2a";
3303 else if (FBS[AArch64::HasV8_3aOps])
3304 Str += "ARMv8.3a";
3305 else if (FBS[AArch64::HasV8_4aOps])
3306 Str += "ARMv8.4a";
3307 else if (FBS[AArch64::HasV8_5aOps])
3308 Str += "ARMv8.5a";
3309 else if (FBS[AArch64::HasV8_6aOps])
3310 Str += "ARMv8.6a";
3311 else if (FBS[AArch64::HasV8_7aOps])
3312 Str += "ARMv8.7a";
3313 else {
3314 SmallVector<std::string, 2> ExtMatches;
3315 for (const auto& Ext : ExtensionMap) {
3316 // Use & in case multiple features are enabled
3317 if ((FBS & Ext.Features) != FeatureBitset())
3318 ExtMatches.push_back(Ext.Name);
3319 }
3320 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3321 }
3322}
3323
3324void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3325 SMLoc S) {
3326 const uint16_t Op2 = Encoding & 7;
3327 const uint16_t Cm = (Encoding & 0x78) >> 3;
3328 const uint16_t Cn = (Encoding & 0x780) >> 7;
3329 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3330
3331 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3332
3333 Operands.push_back(
3334 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3335 Operands.push_back(
3336 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3337 Operands.push_back(
3338 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3339 Expr = MCConstantExpr::create(Op2, getContext());
3340 Operands.push_back(
3341 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3342}
3343
3344/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3345/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3346bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3347 OperandVector &Operands) {
3348 if (Name.find('.') != StringRef::npos)
3349 return TokError("invalid operand");
3350
3351 Mnemonic = Name;
3352 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3353
3354 const AsmToken &Tok = getTok();
3355 StringRef Op = Tok.getString();
3356 SMLoc S = Tok.getLoc();
3357
3358 if (Mnemonic == "ic") {
3359 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3360 if (!IC)
3361 return TokError("invalid operand for IC instruction");
3362 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3363 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3364 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
3365 return TokError(Str);
3366 }
3367 createSysAlias(IC->Encoding, Operands, S);
3368 } else if (Mnemonic == "dc") {
3369 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3370 if (!DC)
3371 return TokError("invalid operand for DC instruction");
3372 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3373 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3374 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3375 return TokError(Str);
3376 }
3377 createSysAlias(DC->Encoding, Operands, S);
3378 } else if (Mnemonic == "at") {
3379 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3380 if (!AT)
3381 return TokError("invalid operand for AT instruction");
3382 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3383 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3384 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
3385 return TokError(Str);
3386 }
3387 createSysAlias(AT->Encoding, Operands, S);
3388 } else if (Mnemonic == "tlbi") {
3389 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3390 if (!TLBI)
3391 return TokError("invalid operand for TLBI instruction");
3392 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3393 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3394 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
3395 return TokError(Str);
3396 }
3397 createSysAlias(TLBI->Encoding, Operands, S);
3398 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
3399 const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
3400 if (!PRCTX)
3401 return TokError("invalid operand for prediction restriction instruction");
3402 else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
3403 std::string Str(
3404 Mnemonic.upper() + std::string(PRCTX->Name) + " requires: ");
3405 setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
3406 return TokError(Str);
3407 }
3408 uint16_t PRCTX_Op2 =
3409 Mnemonic == "cfp" ? 4 :
3410 Mnemonic == "dvp" ? 5 :
3411 Mnemonic == "cpp" ? 7 :
3412 0;
3413 assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction")(static_cast<void> (0));
3414 createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
3415 }
3416
3417 Lex(); // Eat operand.
3418
3419 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3420 bool HasRegister = false;
3421
3422 // Check for the optional register operand.
3423 if (parseOptionalToken(AsmToken::Comma)) {
3424 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3425 return TokError("expected register operand");
3426 HasRegister = true;
3427 }
3428
3429 if (ExpectRegister && !HasRegister)
3430 return TokError("specified " + Mnemonic + " op requires a register");
3431 else if (!ExpectRegister && HasRegister)
3432 return TokError("specified " + Mnemonic + " op does not use a register");
3433
3434 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3435 return true;
3436
3437 return false;
3438}
3439
3440OperandMatchResultTy
3441AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3442 MCAsmParser &Parser = getParser();
3443 const AsmToken &Tok = getTok();
3444
3445 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3446 TokError("'csync' operand expected");
3447 return MatchOperand_ParseFail;
3448 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3449 // Immediate operand.
3450 const MCExpr *ImmVal;
3451 SMLoc ExprLoc = getLoc();
3452 AsmToken IntTok = Tok;
3453 if (getParser().parseExpression(ImmVal))
3454 return MatchOperand_ParseFail;
3455 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3456 if (!MCE) {
3457 Error(ExprLoc, "immediate value expected for barrier operand");
3458 return MatchOperand_ParseFail;
3459 }
3460 int64_t Value = MCE->getValue();
3461 if (Mnemonic == "dsb" && Value > 15) {
3462 // This case is a no match here, but it might be matched by the nXS
3463 // variant. Deliberately not unlex the optional '#' as it is not necessary
3464 // to characterize an integer immediate.
3465 Parser.getLexer().UnLex(IntTok);
3466 return MatchOperand_NoMatch;
3467 }
3468 if (Value < 0 || Value > 15) {
3469 Error(ExprLoc, "barrier operand out of range");
3470 return MatchOperand_ParseFail;
3471 }
3472 auto DB = AArch64DB::lookupDBByEncoding(Value);
3473 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3474 ExprLoc, getContext(),
3475 false /*hasnXSModifier*/));
3476 return MatchOperand_Success;
3477 }
3478
3479 if (Tok.isNot(AsmToken::Identifier)) {
3480 TokError("invalid operand for instruction");
3481 return MatchOperand_ParseFail;
3482 }
3483
3484 StringRef Operand = Tok.getString();
3485 auto TSB = AArch64TSB::lookupTSBByName(Operand);
3486 auto DB = AArch64DB::lookupDBByName(Operand);
3487 // The only valid named option for ISB is 'sy'
3488 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3489 TokError("'sy' or #imm operand expected");
3490 return MatchOperand_ParseFail;
3491 // The only valid named option for TSB is 'csync'
3492 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3493 TokError("'csync' operand expected");
3494 return MatchOperand_ParseFail;
3495 } else if (!DB && !TSB) {
3496 if (Mnemonic == "dsb") {
3497 // This case is a no match here, but it might be matched by the nXS
3498 // variant.
3499 return MatchOperand_NoMatch;
3500 }
3501 TokError("invalid barrier option name");
3502 return MatchOperand_ParseFail;
3503 }
3504
3505 Operands.push_back(AArch64Operand::CreateBarrier(
3506 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3507 getContext(), false /*hasnXSModifier*/));
3508 Lex(); // Consume the option
3509
3510 return MatchOperand_Success;
3511}
3512
3513OperandMatchResultTy
3514AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3515 const AsmToken &Tok = getTok();
3516
3517 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands")(static_cast<void> (0));
3518 if (Mnemonic != "dsb")
3519 return MatchOperand_ParseFail;
3520
3521 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3522 // Immediate operand.
3523 const MCExpr *ImmVal;
3524 SMLoc ExprLoc = getLoc();
3525 if (getParser().parseExpression(ImmVal))
3526 return MatchOperand_ParseFail;
3527 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3528 if (!MCE) {
3529 Error(ExprLoc, "immediate value expected for barrier operand");
3530 return MatchOperand_ParseFail;
3531 }
3532 int64_t Value = MCE->getValue();
3533 // v8.7-A DSB in the nXS variant accepts only the following immediate
3534 // values: 16, 20, 24, 28.
3535 if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
3536 Error(ExprLoc, "barrier operand out of range");
3537 return MatchOperand_ParseFail;
3538 }
3539 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
3540 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
3541 ExprLoc, getContext(),
3542 true /*hasnXSModifier*/));
3543 return MatchOperand_Success;
3544 }
3545
3546 if (Tok.isNot(AsmToken::Identifier)) {
3547 TokError("invalid operand for instruction");
3548 return MatchOperand_ParseFail;
3549 }
3550
3551 StringRef Operand = Tok.getString();
3552 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
3553
3554 if (!DB) {
3555 TokError("invalid barrier option name");
3556 return MatchOperand_ParseFail;
3557 }
3558
3559 Operands.push_back(
3560 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
3561 getContext(), true /*hasnXSModifier*/));
3562 Lex(); // Consume the option
3563
3564 return MatchOperand_Success;
3565}
3566
3567OperandMatchResultTy
3568AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3569 const AsmToken &Tok = getTok();
3570
3571 if (Tok.isNot(AsmToken::Identifier))
3572 return MatchOperand_NoMatch;
3573
3574 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
3575 return MatchOperand_NoMatch;
3576
3577 int MRSReg, MSRReg;
3578 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3579 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3580 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3581 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3582 } else
3583 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3584
3585 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3586 unsigned PStateImm = -1;
3587 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3588 PStateImm = PState->Encoding;
3589
3590 Operands.push_back(
3591 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3592 PStateImm, getContext()));
3593 Lex(); // Eat identifier
3594
3595 return MatchOperand_Success;
3596}
3597
3598/// tryParseNeonVectorRegister - Parse a vector register operand.
3599bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3600 if (getTok().isNot(AsmToken::Identifier))
3601 return true;
3602
3603 SMLoc S = getLoc();
3604 // Check for a vector register specifier first.
3605 StringRef Kind;
3606 unsigned Reg;
3607 OperandMatchResultTy Res =
3608 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3609 if (Res != MatchOperand_Success)
3610 return true;
3611
3612 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3613 if (!KindRes)
3614 return true;
3615
3616 unsigned ElementWidth = KindRes->second;
3617 Operands.push_back(
3618 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3619 S, getLoc(), getContext()));
3620
3621 // If there was an explicit qualifier, that goes on as a literal text
3622 // operand.
3623 if (!Kind.empty())
3624 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
3625
3626 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3627}
3628
3629OperandMatchResultTy
3630AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3631 SMLoc SIdx = getLoc();
3632 if (parseOptionalToken(AsmToken::LBrac)) {
3633 const MCExpr *ImmVal;
3634 if (getParser().parseExpression(ImmVal))
3635 return MatchOperand_NoMatch;
3636 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3637 if (!MCE) {
3638 TokError("immediate value expected for vector index");
3639 return MatchOperand_ParseFail;;
3640 }
3641
3642 SMLoc E = getLoc();
3643
3644 if (parseToken(AsmToken::RBrac, "']' expected"))
3645 return MatchOperand_ParseFail;;
3646
3647 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3648 E, getContext()));
3649 return MatchOperand_Success;
3650 }
3651
3652 return MatchOperand_NoMatch;
3653}
3654
3655// tryParseVectorRegister - Try to parse a vector register name with
3656// optional kind specifier. If it is a register specifier, eat the token
3657// and return it.
3658OperandMatchResultTy
3659AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3660 RegKind MatchKind) {
3661 const AsmToken &Tok = getTok();
3662
3663 if (Tok.isNot(AsmToken::Identifier))
3664 return MatchOperand_NoMatch;
3665
3666 StringRef Name = Tok.getString();
3667 // If there is a kind specifier, it's separated from the register name by
3668 // a '.'.
3669 size_t Start = 0, Next = Name.find('.');
3670 StringRef Head = Name.slice(Start, Next);
3671 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3672
3673 if (RegNum) {
3674 if (Next != StringRef::npos) {
3675 Kind = Name.slice(Next, StringRef::npos);
3676 if (!isValidVectorKind(Kind, MatchKind)) {
3677 TokError("invalid vector kind qualifier");
3678 return MatchOperand_ParseFail;
3679 }
3680 }
3681 Lex(); // Eat the register token.
3682
3683 Reg = RegNum;
3684 return MatchOperand_Success;
3685 }
3686
3687 return MatchOperand_NoMatch;
3688}
3689
3690/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3691OperandMatchResultTy
3692AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3693 // Check for a SVE predicate register specifier first.
3694 const SMLoc S = getLoc();
3695 StringRef Kind;
3696 unsigned RegNum;
3697 auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3698 if (Res != MatchOperand_Success)
3699 return Res;
3700
3701 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3702 if (!KindRes)
3703 return MatchOperand_NoMatch;
3704
3705 unsigned ElementWidth = KindRes->second;
3706 Operands.push_back(AArch64Operand::CreateVectorReg(
3707 RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3708 getLoc(), getContext()));
3709
3710 if (getLexer().is(AsmToken::LBrac)) {
3711 // Indexed predicate, there's no comma so try parse the next operand
3712 // immediately.
3713 if (parseOperand(Operands, false, false))
3714 return MatchOperand_NoMatch;
3715 }
3716
3717 // Not all predicates are followed by a '/m' or '/z'.
3718 if (getTok().isNot(AsmToken::Slash))
3719 return MatchOperand_Success;
3720
3721 // But when they do they shouldn't have an element type suffix.
3722 if (!Kind.empty()) {
3723 Error(S, "not expecting size suffix");
3724 return MatchOperand_ParseFail;
3725 }
3726
3727 // Add a literal slash as operand
3728 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
3729
3730 Lex(); // Eat the slash.
3731
3732 // Zeroing or merging?
3733 auto Pred = getTok().getString().lower();
3734 if (Pred != "z" && Pred != "m") {
3735 Error(getLoc(), "expecting 'm' or 'z' predication");
3736 return MatchOperand_ParseFail;
3737 }
3738
3739 // Add zero/merge token.
3740 const char *ZM = Pred == "z" ? "z" : "m";
3741 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
3742
3743 Lex(); // Eat zero/merge token.
3744 return MatchOperand_Success;
3745}
3746
3747/// parseRegister - Parse a register operand.
3748bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3749 // Try for a Neon vector register.
3750 if (!tryParseNeonVectorRegister(Operands))
3751 return false;
3752
3753 // Otherwise try for a scalar register.
3754 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3755 return false;
3756
3757 return true;
3758}
3759
3760bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3761 bool HasELFModifier = false;
3762 AArch64MCExpr::VariantKind RefKind;
3763
3764 if (parseOptionalToken(AsmToken::Colon)) {
3765 HasELFModifier = true;
3766
3767 if (getTok().isNot(AsmToken::Identifier))
3768 return TokError("expect relocation specifier in operand after ':'");
3769
3770 std::string LowerCase = getTok().getIdentifier().lower();
3771 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3772 .Case("lo12", AArch64MCExpr::VK_LO12)
3773 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3774 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3775 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3776 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3777 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3778 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3779 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3780 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3781 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3782 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3783 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3784 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3785 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3786 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3787 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3788 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3789 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3790 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3791 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3792 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3793 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3794 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3795 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3796 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3797 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3798 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3799 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3800 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3801 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3802 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3803 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3804 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3805 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3806 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3807 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3808 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3809 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
3810 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3811 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3812 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3813 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3814 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3815 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3816 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3817 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3818 .Default(AArch64MCExpr::VK_INVALID);
3819
3820 if (RefKind == AArch64MCExpr::VK_INVALID)
3821 return TokError("expect relocation specifier in operand after ':'");
3822
3823 Lex(); // Eat identifier
3824
3825 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3826 return true;
3827 }
3828
3829 if (getParser().parseExpression(ImmVal))
3830 return true;
3831
3832 if (HasELFModifier)
3833 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3834
3835 return false;
3836}
3837
3838OperandMatchResultTy
3839AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
3840 if (getTok().isNot(AsmToken::LCurly))
3841 return MatchOperand_NoMatch;
3842
3843 auto ParseMatrixTile = [this](unsigned &Reg, unsigned &ElementWidth) {
3844 StringRef Name = getTok().getString();
3845 size_t DotPosition = Name.find('.');
3846 if (DotPosition == StringRef::npos)
3847 return MatchOperand_NoMatch;
3848
3849 unsigned RegNum = matchMatrixTileListRegName(Name);
3850 if (!RegNum)
3851 return MatchOperand_NoMatch;
3852
3853 StringRef Tail = Name.drop_front(DotPosition);
3854 const Optional<std::pair<int, int>> &KindRes =
3855 parseVectorKind(Tail, RegKind::Matrix);
3856 if (!KindRes) {
3857 TokError("Expected the register to be followed by element width suffix");
3858 return MatchOperand_ParseFail;
3859 }
3860 ElementWidth = KindRes->second;
3861 Reg = RegNum;
3862 Lex(); // Eat the register.
3863 return MatchOperand_Success;
3864 };
3865
3866 SMLoc S = getLoc();
3867 auto LCurly = getTok();
3868 Lex(); // Eat left bracket token.
3869
3870 // Empty matrix list
3871 if (parseOptionalToken(AsmToken::RCurly)) {
3872 Operands.push_back(AArch64Operand::CreateMatrixTileList(
3873 /*RegMask=*/0, S, getLoc(), getContext()));
3874 return MatchOperand_Success;
3875 }
3876
3877 // Try parse {za} alias early
3878 if (getTok().getString().equals_insensitive("za")) {
3879 Lex(); // Eat 'za'
3880
3881 if (parseToken(AsmToken::RCurly, "'}' expected"))
3882 return MatchOperand_ParseFail;
3883
3884 Operands.push_back(AArch64Operand::CreateMatrixTileList(
3885 /*RegMask=*/0xFF, S, getLoc(), getContext()));
3886 return MatchOperand_Success;
3887 }
3888
3889 SMLoc TileLoc = getLoc();
3890
3891 unsigned FirstReg, ElementWidth;
3892 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
3893 if (ParseRes != MatchOperand_Success) {
3894 getLexer().UnLex(LCurly);
3895 return ParseRes;
3896 }
3897
3898 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3899
3900 unsigned PrevReg = FirstReg;
3901 unsigned Count = 1;
3902
3903 SmallSet<unsigned, 8> DRegs;
3904 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
3905
3906 SmallSet<unsigned, 8> SeenRegs;
3907 SeenRegs.insert(FirstReg);
3908
3909 while (parseOptionalToken(AsmToken::Comma)) {
3910 TileLoc = getLoc();
3911 unsigned Reg, NextElementWidth;
3912 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
3913 if (ParseRes != MatchOperand_Success)
3914 return ParseRes;
3915
3916 // Element size must match on all regs in the list.
3917 if (ElementWidth != NextElementWidth) {
3918 Error(TileLoc, "mismatched register size suffix");
3919 return MatchOperand_ParseFail;
3920 }
3921
3922 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
3923 Warning(TileLoc, "tile list not in ascending order");
3924
3925 if (SeenRegs.contains(Reg))
3926 Warning(TileLoc, "duplicate tile in list");
3927 else {
3928 SeenRegs.insert(Reg);
3929 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
3930 }
3931
3932 PrevReg = Reg;
3933 ++Count;
3934 }
3935
3936 if (parseToken(AsmToken::RCurly, "'}' expected"))
3937 return MatchOperand_ParseFail;
3938
3939 unsigned RegMask = 0;
3940 for (auto Reg : DRegs)
3941 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
3942 RI->getEncodingValue(AArch64::ZAD0));
3943 Operands.push_back(
3944 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
3945
3946 return MatchOperand_Success;
3947}
3948
3949template <RegKind VectorKind>
3950OperandMatchResultTy
3951AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3952 bool ExpectMatch) {
3953 MCAsmParser &Parser = getParser();
3954 if (!getTok().is(AsmToken::LCurly))
3955 return MatchOperand_NoMatch;
3956
3957 // Wrapper around parse function
3958 auto ParseVector = [this](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3959 bool NoMatchIsError) {
3960 auto RegTok = getTok();
3961 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3962 if (ParseRes == MatchOperand_Success) {
3963 if (parseVectorKind(Kind, VectorKind))
3964 return ParseRes;
3965 llvm_unreachable("Expected a valid vector kind")__builtin_unreachable();
3966 }
3967
3968 if (RegTok.isNot(AsmToken::Identifier) ||
3969 ParseRes == MatchOperand_ParseFail ||
3970 (ParseRes == MatchOperand_NoMatch && NoMatchIsError &&
3971 !RegTok.getString().startswith_insensitive("za"))) {
3972 Error(Loc, "vector register expected");
3973 return MatchOperand_ParseFail;
3974 }
3975
3976 return MatchOperand_NoMatch;
3977 };
3978
3979 SMLoc S = getLoc();
3980 auto LCurly = getTok();
3981 Lex(); // Eat left bracket token.
3982
3983 StringRef Kind;
3984 unsigned FirstReg;
3985 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3986
3987 // Put back the original left bracket if there was no match, so that
3988 // different types of list-operands can be matched (e.g. SVE, Neon).
3989 if (ParseRes == MatchOperand_NoMatch)
3990 Parser.getLexer().UnLex(LCurly);
3991
3992 if (ParseRes != MatchOperand_Success)
3993 return ParseRes;
3994
3995 int64_t PrevReg = FirstReg;
3996 unsigned Count = 1;
3997
3998 if (parseOptionalToken(AsmToken::Minus)) {
3999 SMLoc Loc = getLoc();
4000 StringRef NextKind;
4001
4002 unsigned Reg;
4003 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4004 if (ParseRes != MatchOperand_Success)
4005 return ParseRes;
4006
4007 // Any Kind suffices must match on all regs in the list.
4008 if (Kind != NextKind) {
4009 Error(Loc, "mismatched register size suffix");
4010 return MatchOperand_ParseFail;
4011 }
4012
4013 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
4014
4015 if (Space == 0 || Space > 3) {
4016 Error(Loc, "invalid number of vectors");
4017 return MatchOperand_ParseFail;
4018 }
4019
4020 Count += Space;
4021 }
4022 else {
4023 while (parseOptionalToken(AsmToken::Comma)) {
4024 SMLoc Loc = getLoc();
4025 StringRef NextKind;
4026 unsigned Reg;
4027 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4028 if (ParseRes != MatchOperand_Success)
4029 return ParseRes;
4030
4031 // Any Kind suffices must match on all regs in the list.
4032 if (Kind != NextKind) {
4033 Error(Loc, "mismatched register size suffix");
4034 return MatchOperand_ParseFail;
4035 }
4036
4037 // Registers must be incremental (with wraparound at 31)
4038 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
4039 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
4040 Error(Loc, "registers must be sequential");
4041 return MatchOperand_ParseFail;
4042 }
4043
4044 PrevReg = Reg;
4045 ++Count;
4046 }
4047 }
4048
4049 if (parseToken(AsmToken::RCurly, "'}' expected"))
4050 return MatchOperand_ParseFail;
4051
4052 if (Count > 4) {
4053 Error(S, "invalid number of vectors");
4054 return MatchOperand_ParseFail;
4055 }
4056
4057 unsigned NumElements = 0;
4058 unsigned ElementWidth = 0;
4059 if (!Kind.empty()) {
4060 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4061 std::tie(NumElements, ElementWidth) = *VK;
4062 }
4063
4064 Operands.push_back(AArch64Operand::CreateVectorList(
4065 FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
4066 getContext()));
4067
4068 return MatchOperand_Success;
4069}
4070
4071/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4072bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4073 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4074 if (ParseRes != MatchOperand_Success)
4075 return true;
4076
4077 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
4078}
4079
4080OperandMatchResultTy
4081AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4082 SMLoc StartLoc = getLoc();
4083
4084 unsigned RegNum;
4085 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4086 if (Res != MatchOperand_Success)
4087 return Res;
4088
4089 if (!parseOptionalToken(AsmToken::Comma)) {
4090 Operands.push_back(AArch64Operand::CreateReg(
4091 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4092 return MatchOperand_Success;
4093 }
4094
4095 parseOptionalToken(AsmToken::Hash);
4096
4097 if (getTok().isNot(AsmToken::Integer)) {
4098 Error(getLoc(), "index must be absent or #0");
4099 return MatchOperand_ParseFail;
4100 }
4101
4102 const MCExpr *ImmVal;
4103 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4104 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
4105 Error(getLoc(), "index must be absent or #0");
4106 return MatchOperand_ParseFail;
4107 }
4108
4109 Operands.push_back(AArch64Operand::CreateReg(
4110 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4111 return MatchOperand_Success;
4112}
4113
4114template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4115OperandMatchResultTy
4116AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4117 SMLoc StartLoc = getLoc();
4118
4119 unsigned RegNum;
4120 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4121 if (Res != MatchOperand_Success)
4122 return Res;
4123
4124 // No shift/extend is the default.
4125 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4126 Operands.push_back(AArch64Operand::CreateReg(
4127 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4128 return MatchOperand_Success;
4129 }
4130
4131 // Eat the comma
4132 Lex();
4133
4134 // Match the shift
4135 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
4136 Res = tryParseOptionalShiftExtend(ExtOpnd);
4137 if (Res != MatchOperand_Success)
4138 return Res;
4139
4140 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4141 Operands.push_back(AArch64Operand::CreateReg(
4142 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4143 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4144 Ext->hasShiftExtendAmount()));
4145
4146 return MatchOperand_Success;
4147}
4148
4149bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4150 MCAsmParser &Parser = getParser();
4151
4152 // Some SVE instructions have a decoration after the immediate, i.e.
4153 // "mul vl". We parse them here and add tokens, which must be present in the
4154 // asm string in the tablegen instruction.
4155 bool NextIsVL =
4156 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4157 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4158 if (!getTok().getString().equals_insensitive("mul") ||
4159 !(NextIsVL || NextIsHash))
4160 return true;
4161
4162 Operands.push_back(
4163 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4164 Lex(); // Eat the "mul"
4165
4166 if (NextIsVL) {
4167 Operands.push_back(
4168 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4169 Lex(); // Eat the "vl"
4170 return false;
4171 }
4172
4173 if (NextIsHash) {
4174 Lex(); // Eat the #
4175 SMLoc S = getLoc();
4176
4177 // Parse immediate operand.
4178 const MCExpr *ImmVal;
4179 if (!Parser.parseExpression(ImmVal))
4180 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4181 Operands.push_back(AArch64Operand::CreateImm(
4182 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4183 getContext()));
4184 return MatchOperand_Success;
4185 }
4186 }
4187
4188 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4189}
4190
4191bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4192 auto Tok = getTok();
4193 if (Tok.isNot(AsmToken::Identifier))
4194 return true;
4195
4196 auto Keyword = Tok.getString();
4197 Keyword = StringSwitch<StringRef>(Keyword.lower())
4198 .Case("sm", "sm")
4199 .Case("za", "za")
4200 .Default(Keyword);
4201 Operands.push_back(
4202 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4203
4204 Lex();
4205 return false;
4206}
4207
4208/// parseOperand - Parse a arm instruction operand. For now this parses the
4209/// operand regardless of the mnemonic.
4210bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4211 bool invertCondCode) {
4212 MCAsmParser &Parser = getParser();
4213
4214 OperandMatchResultTy ResTy =
4215 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
4216
4217 // Check if the current operand has a custom associated parser, if so, try to
4218 // custom parse the operand, or fallback to the general approach.
4219 if (ResTy == MatchOperand_Success)
4220 return false;
4221 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4222 // there was a match, but an error occurred, in which case, just return that
4223 // the operand parsing failed.
4224 if (ResTy == MatchOperand_ParseFail)
4225 return true;
4226
4227 // Nothing custom, so do general case parsing.
4228 SMLoc S, E;
4229 switch (getLexer().getKind()) {
4230 default: {
4231 SMLoc S = getLoc();
4232 const MCExpr *Expr;
4233 if (parseSymbolicImmVal(Expr))
4234 return Error(S, "invalid operand");
4235
4236 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4237 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4238 return false;
4239 }
4240 case AsmToken::LBrac: {
4241 Operands.push_back(
4242 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4243 Lex(); // Eat '['
4244
4245 // There's no comma after a '[', so we can parse the next operand
4246 // immediately.
4247 return parseOperand(Operands, false, false);
4248 }
4249 case AsmToken::LCurly: {
4250 if (!parseNeonVectorList(Operands))
4251 return false;
4252
4253 Operands.push_back(
4254 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4255 Lex(); // Eat '{'
4256
4257 // There's no comma after a '{', so we can parse the next operand
4258 // immediately.
4259 return parseOperand(Operands, false, false);
4260 }
4261 case AsmToken::Identifier: {
4262 // If we're expecting a Condition Code operand, then just parse that.
4263 if (isCondCode)
4264 return parseCondCode(Operands, invertCondCode);
4265
4266 // If it's a register name, parse it.
4267 if (!parseRegister(Operands))
4268 return false;
4269
4270 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4271 // by SVE instructions.
4272 if (!parseOptionalMulOperand(Operands))
4273 return false;
4274
4275 // If this is an "smstart" or "smstop" instruction, parse its special
4276 // keyword operand as an identifier.
4277 if (Mnemonic == "smstart" || Mnemonic == "smstop")
4278 return parseKeywordOperand(Operands);
4279
4280 // This could be an optional "shift" or "extend" operand.
4281 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
4282 // We can only continue if no tokens were eaten.
4283 if (GotShift != MatchOperand_NoMatch)
4284 return GotShift;
4285
4286 // If this is a two-word mnemonic, parse its special keyword
4287 // operand as an identifier.
4288 if (Mnemonic == "brb")
4289 return parseKeywordOperand(Operands);
4290
4291 // This was not a register so parse other operands that start with an
4292 // identifier (like labels) as expressions and create them as immediates.
4293 const MCExpr *IdVal;
4294 S = getLoc();
4295 if (getParser().parseExpression(IdVal))
4296 return true;
4297 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4298 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4299 return false;
4300 }
4301 case AsmToken::Integer:
4302 case AsmToken::Real:
4303 case AsmToken::Hash: {
4304 // #42 -> immediate.
4305 S = getLoc();
4306
4307 parseOptionalToken(AsmToken::Hash);
4308
4309 // Parse a negative sign
4310 bool isNegative = false;
4311 if (getTok().is(AsmToken::Minus)) {
4312 isNegative = true;
4313 // We need to consume this token only when we have a Real, otherwise
4314 // we let parseSymbolicImmVal take care of it
4315 if (Parser.getLexer().peekTok().is(AsmToken::Real))
4316 Lex();
4317 }
4318
4319 // The only Real that should come through here is a literal #0.0 for
4320 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4321 // so convert the value.
4322 const AsmToken &Tok = getTok();
4323 if (Tok.is(AsmToken::Real)) {
4324 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4325 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4326 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4327 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4328 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4329 return TokError("unexpected floating point literal");
4330 else if (IntVal != 0 || isNegative)
4331 return TokError("expected floating-point constant #0.0");
4332 Lex(); // Eat the token.
4333
4334 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4335 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4336 return false;
4337 }
4338
4339 const MCExpr *ImmVal;
4340 if (parseSymbolicImmVal(ImmVal))
4341 return true;
4342
4343 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4344 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4345 return false;
4346 }
4347 case AsmToken::Equal: {
4348 SMLoc Loc = getLoc();
4349 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4350 return TokError("unexpected token in operand");
4351 Lex(); // Eat '='
4352 const MCExpr *SubExprVal;
4353 if (getParser().parseExpression(SubExprVal))
4354 return true;
4355
4356 if (Operands.size() < 2 ||
4357 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4358 return Error(Loc, "Only valid when first operand is register");
4359
4360 bool IsXReg =
4361 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4362 Operands[1]->getReg());
4363
4364 MCContext& Ctx = getContext();
4365 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4366 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4367 if (isa<MCConstantExpr>(SubExprVal)) {
4368 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4369 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4370 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
4371 ShiftAmt += 16;
4372 Imm >>= 16;
4373 }
4374 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4375 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4376 Operands.push_back(AArch64Operand::CreateImm(
4377 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4378 if (ShiftAmt)
4379 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4380 ShiftAmt, true, S, E, Ctx));
4381 return false;
4382 }
4383 APInt Simm = APInt(64, Imm << ShiftAmt);
4384 // check if the immediate is an unsigned or signed 32-bit int for W regs
4385 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4386 return Error(Loc, "Immediate too large for register");
4387 }
4388 // If it is a label or an imm that cannot fit in a movz, put it into CP.
4389 const MCExpr *CPLoc =
4390 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4391 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4392 return false;
4393 }
4394 }
4395}
4396
4397bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4398 const MCExpr *Expr = nullptr;
4399 SMLoc L = getLoc();
4400 if (check(getParser().parseExpression(Expr), L, "expected expression"))
4401 return true;
4402 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4403 if (check(!Value, L, "expected constant expression"))
4404 return true;
4405 Out = Value->getValue();
4406 return false;
4407}
4408
4409bool AArch64AsmParser::parseComma() {
4410 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
4411 return true;
4412 // Eat the comma
4413 Lex();
4414 return false;
4415}
4416
4417bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
4418 unsigned First, unsigned Last) {
4419 unsigned Reg;
24
'Reg' declared without an initial value
4420 SMLoc Start, End;
4421 if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register"))
25
Calling 'AArch64AsmParser::ParseRegister'
34
Returning from 'AArch64AsmParser::ParseRegister'
35
Assuming the condition is false
36
Taking false branch
4422 return true;
4423
4424 // Special handling for FP and LR; they aren't linearly after x28 in
4425 // the registers enum.
4426 unsigned RangeEnd = Last;
4427 if (Base
36.1
'Base' is equal to X0
== AArch64::X0) {
37
Taking true branch
4428 if (Last
37.1
'Last' is equal to FP
== AArch64::FP) {
38
Taking true branch
4429 RangeEnd = AArch64::X28;
4430 if (Reg == AArch64::FP) {
39
The left operand of '==' is a garbage value
4431 Out = 29;
4432 return false;
4433 }
4434 }
4435 if (Last == AArch64::LR) {
4436 RangeEnd = AArch64::X28;
4437 if (Reg == AArch64::FP) {
4438 Out = 29;
4439 return false;
4440 } else if (Reg == AArch64::LR) {
4441 Out = 30;
4442 return false;
4443 }
4444 }
4445 }
4446
4447 if (check(Reg < First || Reg > RangeEnd, Start,
4448 Twine("expected register in range ") +
4449 AArch64InstPrinter::getRegisterName(First) + " to " +
4450 AArch64InstPrinter::getRegisterName(Last)))
4451 return true;
4452 Out = Reg - Base;
4453 return false;
4454}
4455
4456bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
4457 const MCParsedAsmOperand &Op2) const {
4458 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
4459 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
4460 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
4461 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
4462 return MCTargetAsmParser::regsEqual(Op1, Op2);
4463
4464 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&(static_cast<void> (0))
4465 "Testing equality of non-scalar registers not supported")(static_cast<void> (0));
4466
4467 // Check if a registers match their sub/super register classes.
4468 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
4469 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
4470 if (AOp1.getRegEqualityTy() == EqualsSubReg)
4471 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
4472 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
4473 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
4474 if (AOp2.getRegEqualityTy() == EqualsSubReg)
4475 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
4476
4477 return false;
4478}
4479
4480/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
4481/// operands.
4482bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
4483 StringRef Name, SMLoc NameLoc,
4484 OperandVector &Operands) {
4485 Name = StringSwitch<StringRef>(Name.lower())
4486 .Case("beq", "b.eq")
4487 .Case("bne", "b.ne")
4488 .Case("bhs", "b.hs")
4489 .Case("bcs", "b.cs")
4490 .Case("blo", "b.lo")
4491 .Case("bcc", "b.cc")
4492 .Case("bmi", "b.mi")
4493 .Case("bpl", "b.pl")
4494 .Case("bvs", "b.vs")
4495 .Case("bvc", "b.vc")
4496 .Case("bhi", "b.hi")
4497 .Case("bls", "b.ls")
4498 .Case("bge", "b.ge")
4499 .Case("blt", "b.lt")
4500 .Case("bgt", "b.gt")
4501 .Case("ble", "b.le")
4502 .Case("bal", "b.al")
4503 .Case("bnv", "b.nv")
4504 .Default(Name);
4505
4506 // First check for the AArch64-specific .req directive.
4507 if (getTok().is(AsmToken::Identifier) &&
4508 getTok().getIdentifier().lower() == ".req") {
4509 parseDirectiveReq(Name, NameLoc);
4510 // We always return 'error' for this, as we're done with this
4511 // statement and don't need to match the 'instruction."
4512 return true;
4513 }
4514
4515 // Create the leading tokens for the mnemonic, split by '.' characters.
4516 size_t Start = 0, Next = Name.find('.');
4517 StringRef Head = Name.slice(Start, Next);
4518
4519 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
4520 // the SYS instruction.
4521 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
4522 Head == "cfp" || Head == "dvp" || Head == "cpp")
4523 return parseSysAlias(Head, NameLoc, Operands);
4524
4525 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
4526 Mnemonic = Head;
4527
4528 // Handle condition codes for a branch mnemonic
4529 if (Head == "b" && Next != StringRef::npos) {
4530 Start = Next;
4531 Next = Name.find('.', Start + 1);
4532 Head = Name.slice(Start + 1, Next);
4533
4534 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4535 (Head.data() - Name.data()));
4536 AArch64CC::CondCode CC = parseCondCodeString(Head);
4537 if (CC == AArch64CC::Invalid)
4538 return Error(SuffixLoc, "invalid condition code");
4539 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
4540 /*IsSuffix=*/true));
4541 Operands.push_back(
4542 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
4543 }
4544
4545 // Add the remaining tokens in the mnemonic.
4546 while (Next != StringRef::npos) {
4547 Start = Next;
4548 Next = Name.find('.', Start + 1);
4549 Head = Name.slice(Start, Next);
4550 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4551 (Head.data() - Name.data()) + 1);
4552 Operands.push_back(AArch64Operand::CreateToken(
4553 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
4554 }
4555
4556 // Conditional compare instructions have a Condition Code operand, which needs
4557 // to be parsed and an immediate operand created.
4558 bool condCodeFourthOperand =
4559 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
4560 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
4561 Head == "csinc" || Head == "csinv" || Head == "csneg");
4562
4563 // These instructions are aliases to some of the conditional select
4564 // instructions. However, the condition code is inverted in the aliased
4565 // instruction.
4566 //
4567 // FIXME: Is this the correct way to handle these? Or should the parser
4568 // generate the aliased instructions directly?
4569 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
4570 bool condCodeThirdOperand =
4571 (Head == "cinc" || Head == "cinv" || Head == "cneg");
4572
4573 // Read the remaining operands.
4574 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4575
4576 unsigned N = 1;
4577 do {
4578 // Parse and remember the operand.
4579 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
4580 (N == 3 && condCodeThirdOperand) ||
4581 (N == 2 && condCodeSecondOperand),
4582 condCodeSecondOperand || condCodeThirdOperand)) {
4583 return true;
4584 }
4585
4586 // After successfully parsing some operands there are three special cases
4587 // to consider (i.e. notional operands not separated by commas). Two are
4588 // due to memory specifiers:
4589 // + An RBrac will end an address for load/store/prefetch
4590 // + An '!' will indicate a pre-indexed operation.
4591 //
4592 // And a further case is '}', which ends a group of tokens specifying the
4593 // SME accumulator array 'ZA' or tile vector, i.e.
4594 //
4595 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
4596 //
4597 // It's someone else's responsibility to make sure these tokens are sane
4598 // in the given context!
4599
4600 if (parseOptionalToken(AsmToken::RBrac))
4601 Operands.push_back(
4602 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4603 if (parseOptionalToken(AsmToken::Exclaim))
4604 Operands.push_back(
4605 AArch64Operand::CreateToken("!", getLoc(), getContext()));
4606 if (parseOptionalToken(AsmToken::RCurly))
4607 Operands.push_back(
4608 AArch64Operand::CreateToken("}", getLoc(), getContext()));
4609
4610 ++N;
4611 } while (parseOptionalToken(AsmToken::Comma));
4612 }
4613
4614 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4615 return true;
4616
4617 return false;
4618}
4619
4620static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
4621 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31))(static_cast<void> (0));
4622 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
4623 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
4624 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
4625 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
4626 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
4627 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
4628}
4629
4630// FIXME: This entire function is a giant hack to provide us with decent
4631// operand range validation/diagnostics until TableGen/MC can be extended
4632// to support autogeneration of this kind of validation.
4633bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
4634 SmallVectorImpl<SMLoc> &Loc) {
4635 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4636 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
4637
4638 // A prefix only applies to the instruction following it. Here we extract
4639 // prefix information for the next instruction before validating the current
4640 // one so that in the case of failure we don't erronously continue using the
4641 // current prefix.
4642 PrefixInfo Prefix = NextPrefix;
4643 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
4644
4645 // Before validating the instruction in isolation we run through the rules
4646 // applicable when it follows a prefix instruction.
4647 // NOTE: brk & hlt can be prefixed but require no additional validation.
4648 if (Prefix.isActive() &&
4649 (Inst.getOpcode() != AArch64::BRK) &&
4650 (Inst.getOpcode() != AArch64::HLT)) {
4651
4652 // Prefixed intructions must have a destructive operand.
4653 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
4654 AArch64::NotDestructive)
4655 return Error(IDLoc, "instruction is unpredictable when following a"
4656 " movprfx, suggest replacing movprfx with mov");
4657
4658 // Destination operands must match.
4659 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
4660 return Error(Loc[0], "instruction is unpredictable when following a"
4661 " movprfx writing to a different destination");
4662
4663 // Destination operand must not be used in any other location.
4664 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
4665 if (Inst.getOperand(i).isReg() &&
4666 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
4667 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
4668 return Error(Loc[0], "instruction is unpredictable when following a"
4669 " movprfx and destination also used as non-destructive"
4670 " source");
4671 }
4672
4673 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
4674 if (Prefix.isPredicated()) {
4675 int PgIdx = -1;
4676
4677 // Find the instructions general predicate.
4678 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
4679 if (Inst.getOperand(i).isReg() &&
4680 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
4681 PgIdx = i;
4682 break;
4683 }
4684
4685 // Instruction must be predicated if the movprfx is predicated.
4686 if (PgIdx == -1 ||
4687 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
4688 return Error(IDLoc, "instruction is unpredictable when following a"
4689 " predicated movprfx, suggest using unpredicated movprfx");
4690
4691 // Instruction must use same general predicate as the movprfx.
4692 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
4693 return Error(IDLoc, "instruction is unpredictable when following a"
4694 " predicated movprfx using a different general predicate");
4695
4696 // Instruction element type must match the movprfx.
4697 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
4698 return Error(IDLoc, "instruction is unpredictable when following a"
4699 " predicated movprfx with a different element size");
4700 }
4701 }
4702
4703 // Check for indexed addressing modes w/ the base register being the
4704 // same as a destination/source register or pair load where
4705 // the Rt == Rt2. All of those are undefined behaviour.
4706 switch (Inst.getOpcode()) {
4707 case AArch64::LDPSWpre:
4708 case AArch64::LDPWpost:
4709 case AArch64::LDPWpre:
4710 case AArch64::LDPXpost:
4711 case AArch64::LDPXpre: {
4712 unsigned Rt = Inst.getOperand(1).getReg();
4713 unsigned Rt2 = Inst.getOperand(2).getReg();
4714 unsigned Rn = Inst.getOperand(3).getReg();
4715 if (RI->isSubRegisterEq(Rn, Rt))
4716 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
4717 "is also a destination");
4718 if (RI->isSubRegisterEq(Rn, Rt2))
4719 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
4720 "is also a destination");
4721 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4722 }
4723 case AArch64::LDPDi:
4724 case AArch64::LDPQi:
4725 case AArch64::LDPSi:
4726 case AArch64::LDPSWi:
4727 case AArch64::LDPWi:
4728 case AArch64::LDPXi: {
4729 unsigned Rt = Inst.getOperand(0).getReg();
4730 unsigned Rt2 = Inst.getOperand(1).getReg();
4731 if (Rt == Rt2)
4732 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4733 break;
4734 }
4735 case AArch64::LDPDpost:
4736 case AArch64::LDPDpre:
4737 case AArch64::LDPQpost:
4738 case AArch64::LDPQpre:
4739 case AArch64::LDPSpost:
4740 case AArch64::LDPSpre:
4741 case AArch64::LDPSWpost: {
4742 unsigned Rt = Inst.getOperand(1).getReg();
4743 unsigned Rt2 = Inst.getOperand(2).getReg();
4744 if (Rt == Rt2)
4745 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4746 break;
4747 }
4748 case AArch64::STPDpost:
4749 case AArch64::STPDpre:
4750 case AArch64::STPQpost:
4751 case AArch64::STPQpre:
4752 case AArch64::STPSpost:
4753 case AArch64::STPSpre:
4754 case AArch64::STPWpost:
4755 case AArch64::STPWpre:
4756 case AArch64::STPXpost:
4757 case AArch64::STPXpre: {
4758 unsigned Rt = Inst.getOperand(1).getReg();
4759 unsigned Rt2 = Inst.getOperand(2).getReg();
4760 unsigned Rn = Inst.getOperand(3).getReg();
4761 if (RI->isSubRegisterEq(Rn, Rt))
4762 return Error(Loc[0], "unpredictable STP instruction, writeback base "
4763 "is also a source");
4764 if (RI->isSubRegisterEq(Rn, Rt2))
4765 return Error(Loc[1], "unpredictable STP instruction, writeback base "
4766 "is also a source");
4767 break;
4768 }
4769 case AArch64::LDRBBpre:
4770 case AArch64::LDRBpre:
4771 case AArch64::LDRHHpre:
4772 case AArch64::LDRHpre:
4773 case AArch64::LDRSBWpre:
4774 case AArch64::LDRSBXpre:
4775 case AArch64::LDRSHWpre:
4776 case AArch64::LDRSHXpre:
4777 case AArch64::LDRSWpre:
4778 case AArch64::LDRWpre:
4779 case AArch64::LDRXpre:
4780 case AArch64::LDRBBpost:
4781 case AArch64::LDRBpost:
4782 case AArch64::LDRHHpost:
4783 case AArch64::LDRHpost:
4784 case AArch64::LDRSBWpost:
4785 case AArch64::LDRSBXpost:
4786 case AArch64::LDRSHWpost:
4787 case AArch64::LDRSHXpost:
4788 case AArch64::LDRSWpost:
4789 case AArch64::LDRWpost:
4790 case AArch64::LDRXpost: {
4791 unsigned Rt = Inst.getOperand(1).getReg();
4792 unsigned Rn = Inst.getOperand(2).getReg();
4793 if (RI->isSubRegisterEq(Rn, Rt))
4794 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4795 "is also a source");
4796 break;
4797 }
4798 case AArch64::STRBBpost:
4799 case AArch64::STRBpost:
4800 case AArch64::STRHHpost:
4801 case AArch64::STRHpost:
4802 case AArch64::STRWpost:
4803 case AArch64::STRXpost:
4804 case AArch64::STRBBpre:
4805 case AArch64::STRBpre:
4806 case AArch64::STRHHpre:
4807 case AArch64::STRHpre:
4808 case AArch64::STRWpre:
4809 case AArch64::STRXpre: {
4810 unsigned Rt = Inst.getOperand(1).getReg();
4811 unsigned Rn = Inst.getOperand(2).getReg();
4812 if (RI->isSubRegisterEq(Rn, Rt))
4813 return Error(Loc[0], "unpredictable STR instruction, writeback base "
4814 "is also a source");
4815 break;
4816 }
4817 case AArch64::STXRB:
4818 case AArch64::STXRH:
4819 case AArch64::STXRW:
4820 case AArch64::STXRX:
4821 case AArch64::STLXRB:
4822 case AArch64::STLXRH:
4823 case AArch64::STLXRW:
4824 case AArch64::STLXRX: {
4825 unsigned Rs = Inst.getOperand(0).getReg();
4826 unsigned Rt = Inst.getOperand(1).getReg();
4827 unsigned Rn = Inst.getOperand(2).getReg();
4828 if (RI->isSubRegisterEq(Rt, Rs) ||
4829 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4830 return Error(Loc[0],
4831 "unpredictable STXR instruction, status is also a source");
4832 break;
4833 }
4834 case AArch64::STXPW:
4835 case AArch64::STXPX:
4836 case AArch64::STLXPW:
4837 case AArch64::STLXPX: {
4838 unsigned Rs = Inst.getOperand(0).getReg();
4839 unsigned Rt1 = Inst.getOperand(1).getReg();
4840 unsigned Rt2 = Inst.getOperand(2).getReg();
4841 unsigned Rn = Inst.getOperand(3).getReg();
4842 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4843 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4844 return Error(Loc[0],
4845 "unpredictable STXP instruction, status is also a source");
4846 break;
4847 }
4848 case AArch64::LDRABwriteback:
4849 case AArch64::LDRAAwriteback: {
4850 unsigned Xt = Inst.getOperand(0).getReg();
4851 unsigned Xn = Inst.getOperand(1).getReg();
4852 if (Xt == Xn)
4853 return Error(Loc[0],
4854 "unpredictable LDRA instruction, writeback base"
4855 " is also a destination");
4856 break;
4857 }
4858 }
4859
4860
4861 // Now check immediate ranges. Separate from the above as there is overlap
4862 // in the instructions being checked and this keeps the nested conditionals
4863 // to a minimum.
4864 switch (Inst.getOpcode()) {
4865 case AArch64::ADDSWri:
4866 case AArch64::ADDSXri:
4867 case AArch64::ADDWri:
4868 case AArch64::ADDXri:
4869 case AArch64::SUBSWri:
4870 case AArch64::SUBSXri:
4871 case AArch64::SUBWri:
4872 case AArch64::SUBXri: {
4873 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4874 // some slight duplication here.
4875 if (Inst.getOperand(2).isExpr()) {
4876 const MCExpr *Expr = Inst.getOperand(2).getExpr();
4877 AArch64MCExpr::VariantKind ELFRefKind;
4878 MCSymbolRefExpr::VariantKind DarwinRefKind;
4879 int64_t Addend;
4880 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4881
4882 // Only allow these with ADDXri.
4883 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4884 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4885 Inst.getOpcode() == AArch64::ADDXri)
4886 return false;
4887
4888 // Only allow these with ADDXri/ADDWri
4889 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4890 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4891 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4892 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4893 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4894 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4895 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4896 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4897 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4898 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4899 (Inst.getOpcode() == AArch64::ADDXri ||
4900 Inst.getOpcode() == AArch64::ADDWri))
4901 return false;
4902
4903 // Don't allow symbol refs in the immediate field otherwise
4904 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4905 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4906 // 'cmp w0, 'borked')
4907 return Error(Loc.back(), "invalid immediate expression");
4908 }
4909 // We don't validate more complex expressions here
4910 }
4911 return false;
4912 }
4913 default:
4914 return false;
4915 }
4916}
4917
4918static std::string AArch64MnemonicSpellCheck(StringRef S,
4919 const FeatureBitset &FBS,
4920 unsigned VariantID = 0);
4921
4922bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4923 uint64_t ErrorInfo,
4924 OperandVector &Operands) {
4925 switch (ErrCode) {
4926 case Match_InvalidTiedOperand: {
4927 RegConstraintEqualityTy EqTy =
4928 static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4929 .getRegEqualityTy();
4930 switch (EqTy) {
4931 case RegConstraintEqualityTy::EqualsSubReg:
4932 return Error(Loc, "operand must be 64-bit form of destination register");
4933 case RegConstraintEqualityTy::EqualsSuperReg:
4934 return Error(Loc, "operand must be 32-bit form of destination register");
4935 case RegConstraintEqualityTy::EqualsReg:
4936 return Error(Loc, "operand must match destination register");
4937 }
4938 llvm_unreachable("Unknown RegConstraintEqualityTy")__builtin_unreachable();
4939 }
4940 case Match_MissingFeature:
4941 return Error(Loc,
4942 "instruction requires a CPU feature not currently enabled");
4943 case Match_InvalidOperand:
4944 return Error(Loc, "invalid operand for instruction");
4945 case Match_InvalidSuffix:
4946 return Error(Loc, "invalid type suffix for instruction");
4947 case Match_InvalidCondCode:
4948 return Error(Loc, "expected AArch64 condition code");
4949 case Match_AddSubRegExtendSmall:
4950 return Error(Loc,
4951 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4952 case Match_AddSubRegExtendLarge:
4953 return Error(Loc,
4954 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4955 case Match_AddSubSecondSource:
4956 return Error(Loc,
4957 "expected compatible register, symbol or integer in range [0, 4095]");
4958 case Match_LogicalSecondSource:
4959 return Error(Loc, "expected compatible register or logical immediate");
4960 case Match_InvalidMovImm32Shift:
4961 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4962 case Match_InvalidMovImm64Shift:
4963 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4964 case Match_AddSubRegShift32:
4965 return Error(Loc,
4966 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4967 case Match_AddSubRegShift64:
4968 return Error(Loc,
4969 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4970 case Match_InvalidFPImm:
4971 return Error(Loc,
4972 "expected compatible register or floating-point constant");
4973 case Match_InvalidMemoryIndexedSImm6:
4974 return Error(Loc, "index must be an integer in range [-32, 31].");
4975 case Match_InvalidMemoryIndexedSImm5:
4976 return Error(Loc, "index must be an integer in range [-16, 15].");
4977 case Match_InvalidMemoryIndexed1SImm4:
4978 return Error(Loc, "index must be an integer in range [-8, 7].");
4979 case Match_InvalidMemoryIndexed2SImm4:
4980 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4981 case Match_InvalidMemoryIndexed3SImm4:
4982 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4983 case Match_InvalidMemoryIndexed4SImm4:
4984 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4985 case Match_InvalidMemoryIndexed16SImm4:
4986 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4987 case Match_InvalidMemoryIndexed32SImm4:
4988 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
4989 case Match_InvalidMemoryIndexed1SImm6:
4990 return Error(Loc, "index must be an integer in range [-32, 31].");
4991 case Match_InvalidMemoryIndexedSImm8:
4992 return Error(Loc, "index must be an integer in range [-128, 127].");
4993 case Match_InvalidMemoryIndexedSImm9:
4994 return Error(Loc, "index must be an integer in range [-256, 255].");
4995 case Match_InvalidMemoryIndexed16SImm9:
4996 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4997 case Match_InvalidMemoryIndexed8SImm10:
4998 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4999 case Match_InvalidMemoryIndexed4SImm7:
5000 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5001 case Match_InvalidMemoryIndexed8SImm7:
5002 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5003 case Match_InvalidMemoryIndexed16SImm7:
5004 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5005 case Match_InvalidMemoryIndexed8UImm5:
5006 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5007 case Match_InvalidMemoryIndexed4UImm5:
5008 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5009 case Match_InvalidMemoryIndexed2UImm5:
5010 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5011 case Match_InvalidMemoryIndexed8UImm6:
5012 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5013 case Match_InvalidMemoryIndexed16UImm6:
5014 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5015 case Match_InvalidMemoryIndexed4UImm6:
5016 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5017 case Match_InvalidMemoryIndexed2UImm6:
5018 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5019 case Match_InvalidMemoryIndexed1UImm6:
5020 return Error(Loc, "index must be in range [0, 63].");
5021 case Match_InvalidMemoryWExtend8:
5022 return Error(Loc,
5023 "expected 'uxtw' or 'sxtw' with optional shift of #0");
5024 case Match_InvalidMemoryWExtend16:
5025 return Error(Loc,
5026 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5027 case Match_InvalidMemoryWExtend32:
5028 return Error(Loc,
5029 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5030 case Match_InvalidMemoryWExtend64:
5031 return Error(Loc,
5032 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5033 case Match_InvalidMemoryWExtend128:
5034 return Error(Loc,
5035 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5036 case Match_InvalidMemoryXExtend8:
5037 return Error(Loc,
5038 "expected 'lsl' or 'sxtx' with optional shift of #0");
5039 case Match_InvalidMemoryXExtend16:
5040 return Error(Loc,
5041 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5042 case Match_InvalidMemoryXExtend32:
5043 return Error(Loc,
5044 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5045 case Match_InvalidMemoryXExtend64:
5046 return Error(Loc,
5047 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5048 case Match_InvalidMemoryXExtend128:
5049 return Error(Loc,
5050 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5051 case Match_InvalidMemoryIndexed1:
5052 return Error(Loc, "index must be an integer in range [0, 4095].");
5053 case Match_InvalidMemoryIndexed2:
5054 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
5055 case Match_InvalidMemoryIndexed4:
5056 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
5057 case Match_InvalidMemoryIndexed8:
5058 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
5059 case Match_InvalidMemoryIndexed16:
5060 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
5061 case Match_InvalidImm0_1:
5062 return Error(Loc, "immediate must be an integer in range [0, 1].");
5063 case Match_InvalidImm0_3:
5064 return Error(Loc, "immediate must be an integer in range [0, 3].");
5065 case Match_InvalidImm0_7:
5066 return Error(Loc, "immediate must be an integer in range [0, 7].");
5067 case Match_InvalidImm0_15:
5068 return Error(Loc, "immediate must be an integer in range [0, 15].");
5069 case Match_InvalidImm0_31:
5070 return Error(Loc, "immediate must be an integer in range [0, 31].");
5071 case Match_InvalidImm0_63:
5072 return Error(Loc, "immediate must be an integer in range [0, 63].");
5073 case Match_InvalidImm0_127:
5074 return Error(Loc, "immediate must be an integer in range [0, 127].");
5075 case Match_InvalidImm0_255:
5076 return Error(Loc, "immediate must be an integer in range [0, 255].");
5077 case Match_InvalidImm0_65535:
5078 return Error(Loc, "immediate must be an integer in range [0, 65535].");
5079 case Match_InvalidImm1_8:
5080 return Error(Loc, "immediate must be an integer in range [1, 8].");
5081 case Match_InvalidImm1_16:
5082 return Error(Loc, "immediate must be an integer in range [1, 16].");
5083 case Match_InvalidImm1_32:
5084 return Error(Loc, "immediate must be an integer in range [1, 32].");
5085 case Match_InvalidImm1_64:
5086 return Error(Loc, "immediate must be an integer in range [1, 64].");
5087 case Match_InvalidSVEAddSubImm8:
5088 return Error(Loc, "immediate must be an integer in range [0, 255]"
5089 " with a shift amount of 0");
5090 case Match_InvalidSVEAddSubImm16:
5091 case Match_InvalidSVEAddSubImm32:
5092 case Match_InvalidSVEAddSubImm64:
5093 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
5094 "multiple of 256 in range [256, 65280]");
5095 case Match_InvalidSVECpyImm8:
5096 return Error(Loc, "immediate must be an integer in range [-128, 255]"
5097 " with a shift amount of 0");
5098 case Match_InvalidSVECpyImm16:
5099 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5100 "multiple of 256 in range [-32768, 65280]");
5101 case Match_InvalidSVECpyImm32:
5102 case Match_InvalidSVECpyImm64:
5103 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5104 "multiple of 256 in range [-32768, 32512]");
5105 case Match_InvalidIndexRange0_0:
5106 return Error(Loc, "expected lane specifier '[0]'");
5107 case Match_InvalidIndexRange1_1:
5108 return Error(Loc, "expected lane specifier '[1]'");
5109 case Match_InvalidIndexRange0_15:
5110 return Error(Loc, "vector lane must be an integer in range [0, 15].");
5111 case Match_InvalidIndexRange0_7:
5112 return Error(Loc, "vector lane must be an integer in range [0, 7].");
5113 case Match_InvalidIndexRange0_3:
5114 return Error(Loc, "vector lane must be an integer in range [0, 3].");
5115 case Match_InvalidIndexRange0_1:
5116 return Error(Loc, "vector lane must be an integer in range [0, 1].");
5117 case Match_InvalidSVEIndexRange0_63:
5118 return Error(Loc, "vector lane must be an integer in range [0, 63].");
5119 case Match_InvalidSVEIndexRange0_31:
5120 return Error(Loc, "vector lane must be an integer in range [0, 31].");
5121 case Match_InvalidSVEIndexRange0_15:
5122 return Error(Loc, "vector lane must be an integer in range [0, 15].");
5123 case Match_InvalidSVEIndexRange0_7:
5124 return Error(Loc, "vector lane must be an integer in range [0, 7].");
5125 case Match_InvalidSVEIndexRange0_3:
5126 return Error(Loc, "vector lane must be an integer in range [0, 3].");
5127 case Match_InvalidLabel:
5128 return Error(Loc, "expected label or encodable integer pc offset");
5129 case Match_MRS:
5130 return Error(Loc, "expected readable system register");
5131 case Match_MSR:
5132 case Match_InvalidSVCR:
5133 return Error(Loc, "expected writable system register or pstate");
5134 case Match_InvalidComplexRotationEven:
5135 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
5136 case Match_InvalidComplexRotationOdd:
5137 return Error(Loc, "complex rotation must be 90 or 270.");
5138 case Match_MnemonicFail: {
5139 std::string Suggestion = AArch64MnemonicSpellCheck(
5140 ((AArch64Operand &)*Operands[0]).getToken(),
5141 ComputeAvailableFeatures(STI->getFeatureBits()));
5142 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
5143 }
5144 case Match_InvalidGPR64shifted8:
5145 return Error(Loc, "register must be x0..x30 or xzr, without shift");
5146 case Match_InvalidGPR64shifted16:
5147 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
5148 case Match_InvalidGPR64shifted32:
5149 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
5150 case Match_InvalidGPR64shifted64:
5151 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
5152 case Match_InvalidGPR64shifted128:
5153 return Error(
5154 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
5155 case Match_InvalidGPR64NoXZRshifted8:
5156 return Error(Loc, "register must be x0..x30 without shift");
5157 case Match_InvalidGPR64NoXZRshifted16:
5158 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
5159 case Match_InvalidGPR64NoXZRshifted32:
5160 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
5161 case Match_InvalidGPR64NoXZRshifted64:
5162 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
5163 case Match_InvalidGPR64NoXZRshifted128:
5164 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
5165 case Match_InvalidZPR32UXTW8:
5166 case Match_InvalidZPR32SXTW8:
5167 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
5168 case Match_InvalidZPR32UXTW16:
5169 case Match_InvalidZPR32SXTW16:
5170 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
5171 case Match_InvalidZPR32UXTW32:
5172 case Match_InvalidZPR32SXTW32:
5173 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
5174 case Match_InvalidZPR32UXTW64:
5175 case Match_InvalidZPR32SXTW64:
5176 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
5177 case Match_InvalidZPR64UXTW8:
5178 case Match_InvalidZPR64SXTW8:
5179 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
5180 case Match_InvalidZPR64UXTW16:
5181 case Match_InvalidZPR64SXTW16:
5182 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
5183 case Match_InvalidZPR64UXTW32:
5184 case Match_InvalidZPR64SXTW32:
5185 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
5186 case Match_InvalidZPR64UXTW64:
5187 case Match_InvalidZPR64SXTW64:
5188 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
5189 case Match_InvalidZPR32LSL8:
5190 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
5191 case Match_InvalidZPR32LSL16:
5192 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
5193 case Match_InvalidZPR32LSL32:
5194 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
5195 case Match_InvalidZPR32LSL64:
5196 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
5197 case Match_InvalidZPR64LSL8:
5198 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
5199 case Match_InvalidZPR64LSL16:
5200 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
5201 case Match_InvalidZPR64LSL32:
5202 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
5203 case Match_InvalidZPR64LSL64:
5204 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
5205 case Match_InvalidZPR0:
5206 return Error(Loc, "expected register without element width suffix");
5207 case Match_InvalidZPR8:
5208 case Match_InvalidZPR16:
5209 case Match_InvalidZPR32:
5210 case Match_InvalidZPR64:
5211 case Match_InvalidZPR128:
5212 return Error(Loc, "invalid element width");
5213 case Match_InvalidZPR_3b8:
5214 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
5215 case Match_InvalidZPR_3b16:
5216 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
5217 case Match_InvalidZPR_3b32:
5218 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
5219 case Match_InvalidZPR_4b16:
5220 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
5221 case Match_InvalidZPR_4b32:
5222 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
5223 case Match_InvalidZPR_4b64:
5224 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
5225 case Match_InvalidSVEPattern:
5226 return Error(Loc, "invalid predicate pattern");
5227 case Match_InvalidSVEPredicateAnyReg:
5228 case Match_InvalidSVEPredicateBReg:
5229 case Match_InvalidSVEPredicateHReg:
5230 case Match_InvalidSVEPredicateSReg:
5231 case Match_InvalidSVEPredicateDReg:
5232 return Error(Loc, "invalid predicate register.");
5233 case Match_InvalidSVEPredicate3bAnyReg:
5234 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
5235 case Match_InvalidSVEExactFPImmOperandHalfOne:
5236 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
5237 case Match_InvalidSVEExactFPImmOperandHalfTwo:
5238 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
5239 case Match_InvalidSVEExactFPImmOperandZeroOne:
5240 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
5241 case Match_InvalidMatrixTileVectorH8:
5242 case Match_InvalidMatrixTileVectorV8:
5243 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
5244 case Match_InvalidMatrixTileVectorH16:
5245 case Match_InvalidMatrixTileVectorV16:
5246 return Error(Loc,
5247 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
5248 case Match_InvalidMatrixTileVectorH32:
5249 case Match_InvalidMatrixTileVectorV32:
5250 return Error(Loc,
5251 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
5252 case Match_InvalidMatrixTileVectorH64:
5253 case Match_InvalidMatrixTileVectorV64:
5254 return Error(Loc,
5255 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
5256 case Match_InvalidMatrixTileVectorH128:
5257 case Match_InvalidMatrixTileVectorV128:
5258 return Error(Loc,
5259 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
5260 case Match_InvalidMatrixTile32:
5261 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
5262 case Match_InvalidMatrixTile64:
5263 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
5264 case Match_InvalidMatrix:
5265 return Error(Loc, "invalid matrix operand, expected za");
5266 case Match_InvalidMatrixIndexGPR32_12_15:
5267 return Error(Loc, "operand must be a register in range [w12, w15]");
5268 default:
5269 llvm_unreachable("unexpected error code!")__builtin_unreachable();
5270 }
5271}
5272
5273static const char *getSubtargetFeatureName(uint64_t Val);
5274
5275bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
5276 OperandVector &Operands,
5277 MCStreamer &Out,
5278 uint64_t &ErrorInfo,
5279 bool MatchingInlineAsm) {
5280 assert(!Operands.empty() && "Unexpect empty operand list!")(static_cast<void> (0));
5281 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
5282 assert(Op.isToken() && "Leading operand should always be a mnemonic!")(static_cast<void> (0));
5283
5284 StringRef Tok = Op.getToken();
5285 unsigned NumOperands = Operands.size();
5286
5287 if (NumOperands == 4 && Tok == "lsl") {
5288 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
5289 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5290 if (Op2.isScalarReg() && Op3.isImm()) {
5291 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5292 if (Op3CE) {
5293 uint64_t Op3Val = Op3CE->getValue();
5294 uint64_t NewOp3Val = 0;
5295 uint64_t NewOp4Val = 0;
5296 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
5297 Op2.getReg())) {
5298 NewOp3Val = (32 - Op3Val) & 0x1f;
5299 NewOp4Val = 31 - Op3Val;
5300 } else {
5301 NewOp3Val = (64 - Op3Val) & 0x3f;
5302 NewOp4Val = 63 - Op3Val;
5303 }
5304
5305 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
5306 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
5307
5308 Operands[0] =
5309 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
5310 Operands.push_back(AArch64Operand::CreateImm(
5311 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
5312 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
5313 Op3.getEndLoc(), getContext());
5314 }
5315 }
5316 } else if (NumOperands == 4 && Tok == "bfc") {
5317 // FIXME: Horrible hack to handle BFC->BFM alias.
5318 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5319 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
5320 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
5321
5322 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
5323 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
5324 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
5325
5326 if (LSBCE && WidthCE) {
5327 uint64_t LSB = LSBCE->getValue();
5328 uint64_t Width = WidthCE->getValue();
5329
5330 uint64_t RegWidth = 0;
5331 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5332 Op1.getReg()))
5333 RegWidth = 64;
5334 else
5335 RegWidth = 32;
5336
5337 if (LSB >= RegWidth)
5338 return Error(LSBOp.getStartLoc(),
5339 "expected integer in range [0, 31]");
5340 if (Width < 1 || Width > RegWidth)
5341 return Error(WidthOp.getStartLoc(),
5342 "expected integer in range [1, 32]");
5343
5344 uint64_t ImmR = 0;
5345 if (RegWidth == 32)
5346 ImmR = (32 - LSB) & 0x1f;
5347 else
5348 ImmR = (64 - LSB) & 0x3f;
5349
5350 uint64_t ImmS = Width - 1;
5351
5352 if (ImmR != 0 && ImmS >= ImmR)
5353 return Error(WidthOp.getStartLoc(),
5354 "requested insert overflows register");
5355
5356 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
5357 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
5358 Operands[0] =
5359 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
5360 Operands[2] = AArch64Operand::CreateReg(
5361 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
5362 SMLoc(), SMLoc(), getContext());
5363 Operands[3] = AArch64Operand::CreateImm(
5364 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
5365 Operands.emplace_back(
5366 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
5367 WidthOp.getEndLoc(), getContext()));
5368 }
5369 }
5370 } else if (NumOperands == 5) {
5371 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
5372 // UBFIZ -> UBFM aliases.
5373 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
5374 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5375 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5376 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
5377
5378 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
5379 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5380 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
5381
5382 if (Op3CE && Op4CE) {
5383 uint64_t Op3Val = Op3CE->getValue();
5384 uint64_t Op4Val = Op4CE->getValue();
5385
5386 uint64_t RegWidth = 0;
5387 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5388 Op1.getReg()))
5389 RegWidth = 64;
5390 else
5391 RegWidth = 32;
5392
5393 if (Op3Val >= RegWidth)
5394 return Error(Op3.getStartLoc(),
5395 "expected integer in range [0, 31]");
5396 if (Op4Val < 1 || Op4Val > RegWidth)
5397 return Error(Op4.getStartLoc(),
5398 "expected integer in range [1, 32]");
5399
5400 uint64_t NewOp3Val = 0;
5401 if (RegWidth == 32)
5402 NewOp3Val = (32 - Op3Val) & 0x1f;
5403 else
5404 NewOp3Val = (64 - Op3Val) & 0x3f;
5405
5406 uint64_t NewOp4Val = Op4Val - 1;
5407
5408 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
5409 return Error(Op4.getStartLoc(),
5410 "requested insert overflows register");
5411
5412 const MCExpr *NewOp3 =
5413 MCConstantExpr::create(NewOp3Val, getContext());
5414 const MCExpr *NewOp4 =
5415 MCConstantExpr::create(NewOp4Val, getContext());
5416 Operands[3] = AArch64Operand::CreateImm(
5417 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
5418 Operands[4] = AArch64Operand::CreateImm(
5419 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
5420 if (Tok == "bfi")
5421 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
5422 getContext());
5423 else if (Tok == "sbfiz")
5424 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
5425 getContext());
5426 else if (Tok == "ubfiz")
5427 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
5428 getContext());
5429 else
5430 llvm_unreachable("No valid mnemonic for alias?")__builtin_unreachable();
5431 }
5432 }
5433
5434 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
5435 // UBFX -> UBFM aliases.
5436 } else if (NumOperands == 5 &&
5437 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
5438 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5439 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5440 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
5441
5442 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
5443 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5444 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
5445
5446 if (Op3CE && Op4CE) {
5447 uint64_t Op3Val = Op3CE->getValue();
5448 uint64_t Op4Val = Op4CE->getValue();
5449
5450 uint64_t RegWidth = 0;
5451 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5452 Op1.getReg()))
5453 RegWidth = 64;
5454 else
5455 RegWidth = 32;
5456
5457 if (Op3Val >= RegWidth)
5458 return Error(Op3.getStartLoc(),
5459 "expected integer in range [0, 31]");
5460 if (Op4Val < 1 || Op4Val > RegWidth)
5461 return Error(Op4.getStartLoc(),
5462 "expected integer in range [1, 32]");
5463
5464 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
5465
5466 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
5467 return Error(Op4.getStartLoc(),
5468 "requested extract overflows register");
5469
5470 const MCExpr *NewOp4 =
5471 MCConstantExpr::create(NewOp4Val, getContext());
5472 Operands[4] = AArch64Operand::CreateImm(
5473 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
5474 if (Tok == "bfxil")
5475 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
5476 getContext());
5477 else if (Tok == "sbfx")
5478 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
5479 getContext());
5480 else if (Tok == "ubfx")
5481 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
5482 getContext());
5483 else
5484 llvm_unreachable("No valid mnemonic for alias?")__builtin_unreachable();
5485 }
5486 }
5487 }
5488 }
5489
5490 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
5491 // instruction for FP registers correctly in some rare circumstances. Convert
5492 // it to a safe instruction and warn (because silently changing someone's
5493 // assembly is rude).
5494 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
5495 NumOperands == 4 && Tok == "movi") {
5496 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5497 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
5498 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5499 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
5500 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
5501 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
5502 if (Suffix.lower() == ".2d" &&
5503 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
5504 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
5505 " correctly on this CPU, converting to equivalent movi.16b");
5506 // Switch the suffix to .16b.
5507 unsigned Idx = Op1.isToken() ? 1 : 2;
5508 Operands[Idx] =
5509 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
5510 }
5511 }
5512 }
5513
5514 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
5515 // InstAlias can't quite handle this since the reg classes aren't
5516 // subclasses.
5517 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
5518 // The source register can be Wn here, but the matcher expects a
5519 // GPR64. Twiddle it here if necessary.
5520 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5521 if (Op.isScalarReg()) {
5522 unsigned Reg = getXRegFromWReg(Op.getReg());
5523 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5524 Op.getStartLoc(), Op.getEndLoc(),
5525 getContext());
5526 }
5527 }
5528 // FIXME: Likewise for sxt[bh] with a Xd dst operand
5529 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
5530 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5531 if (Op.isScalarReg() &&
5532 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5533 Op.getReg())) {
5534 // The source register can be Wn here, but the matcher expects a
5535 // GPR64. Twiddle it here if necessary.
5536 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5537 if (Op.isScalarReg()) {
5538 unsigned Reg = getXRegFromWReg(Op.getReg());
5539 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5540 Op.getStartLoc(),
5541 Op.getEndLoc(), getContext());
5542 }
5543 }
5544 }
5545 // FIXME: Likewise for uxt[bh] with a Xd dst operand
5546 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
5547 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5548 if (Op.isScalarReg() &&
5549 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5550 Op.getReg())) {
5551 // The source register can be Wn here, but the matcher expects a
5552 // GPR32. Twiddle it here if necessary.
5553 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5554 if (Op.isScalarReg()) {
5555 unsigned Reg = getWRegFromXReg(Op.getReg());
5556 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5557 Op.getStartLoc(),
5558 Op.getEndLoc(), getContext());
5559 }
5560 }
5561 }
5562
5563 MCInst Inst;
5564 FeatureBitset MissingFeatures;
5565 // First try to match against the secondary set of tables containing the
5566 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
5567 unsigned MatchResult =
5568 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5569 MatchingInlineAsm, 1);
5570
5571 // If that fails, try against the alternate table containing long-form NEON:
5572 // "fadd v0.2s, v1.2s, v2.2s"
5573 if (MatchResult != Match_Success) {
5574 // But first, save the short-form match result: we can use it in case the
5575 // long-form match also fails.
5576 auto ShortFormNEONErrorInfo = ErrorInfo;
5577 auto ShortFormNEONMatchResult = MatchResult;
5578 auto ShortFormNEONMissingFeatures = MissingFeatures;
5579
5580 MatchResult =
5581 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5582 MatchingInlineAsm, 0);
5583
5584 // Now, both matches failed, and the long-form match failed on the mnemonic
5585 // suffix token operand. The short-form match failure is probably more
5586 // relevant: use it instead.
5587 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
5588 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
5589 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
5590 MatchResult = ShortFormNEONMatchResult;
5591 ErrorInfo = ShortFormNEONErrorInfo;
5592 MissingFeatures = ShortFormNEONMissingFeatures;
5593 }
5594 }
5595
5596 switch (MatchResult) {
5597 case Match_Success: {
5598 // Perform range checking and other semantic validations
5599 SmallVector<SMLoc, 8> OperandLocs;
5600 NumOperands = Operands.size();
5601 for (unsigned i = 1; i < NumOperands; ++i)
5602 OperandLocs.push_back(Operands[i]->getStartLoc());
5603 if (validateInstruction(Inst, IDLoc, OperandLocs))
5604 return true;
5605
5606 Inst.setLoc(IDLoc);
5607 Out.emitInstruction(Inst, getSTI());
5608 return false;
5609 }
5610 case Match_MissingFeature: {
5611 assert(MissingFeatures.any() && "Unknown missing feature!")(static_cast<void> (0));
5612 // Special case the error message for the very common case where only
5613 // a single subtarget feature is missing (neon, e.g.).
5614 std::string Msg = "instruction requires:";
5615 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
5616 if (MissingFeatures[i]) {
5617 Msg += " ";
5618 Msg += getSubtargetFeatureName(i);
5619 }
5620 }
5621 return Error(IDLoc, Msg);
5622 }
5623 case Match_MnemonicFail:
5624 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
5625 case Match_InvalidOperand: {
5626 SMLoc ErrorLoc = IDLoc;
5627
5628 if (ErrorInfo != ~0ULL) {
5629 if (ErrorInfo >= Operands.size())
5630 return Error(IDLoc, "too few operands for instruction",
5631 SMRange(IDLoc, getTok().getLoc()));
5632
5633 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5634 if (ErrorLoc == SMLoc())
5635 ErrorLoc = IDLoc;
5636 }
5637 // If the match failed on a suffix token operand, tweak the diagnostic
5638 // accordingly.
5639 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
5640 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
5641 MatchResult = Match_InvalidSuffix;
5642
5643 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5644 }
5645 case Match_InvalidTiedOperand:
5646 case Match_InvalidMemoryIndexed1:
5647 case Match_InvalidMemoryIndexed2:
5648 case Match_InvalidMemoryIndexed4:
5649 case Match_InvalidMemoryIndexed8:
5650 case Match_InvalidMemoryIndexed16:
5651 case Match_InvalidCondCode:
5652 case Match_AddSubRegExtendSmall:
5653 case Match_AddSubRegExtendLarge:
5654 case Match_AddSubSecondSource:
5655 case Match_LogicalSecondSource:
5656 case Match_AddSubRegShift32:
5657 case Match_AddSubRegShift64:
5658 case Match_InvalidMovImm32Shift:
5659 case Match_InvalidMovImm64Shift:
5660 case Match_InvalidFPImm:
5661 case Match_InvalidMemoryWExtend8:
5662 case Match_InvalidMemoryWExtend16:
5663 case Match_InvalidMemoryWExtend32:
5664 case Match_InvalidMemoryWExtend64:
5665 case Match_InvalidMemoryWExtend128:
5666 case Match_InvalidMemoryXExtend8:
5667 case Match_InvalidMemoryXExtend16:
5668 case Match_InvalidMemoryXExtend32:
5669 case Match_InvalidMemoryXExtend64:
5670 case Match_InvalidMemoryXExtend128:
5671 case Match_InvalidMemoryIndexed1SImm4:
5672 case Match_InvalidMemoryIndexed2SImm4:
5673 case Match_InvalidMemoryIndexed3SImm4:
5674 case Match_InvalidMemoryIndexed4SImm4:
5675 case Match_InvalidMemoryIndexed1SImm6:
5676 case Match_InvalidMemoryIndexed16SImm4:
5677 case Match_InvalidMemoryIndexed32SImm4:
5678 case Match_InvalidMemoryIndexed4SImm7:
5679 case Match_InvalidMemoryIndexed8SImm7:
5680 case Match_InvalidMemoryIndexed16SImm7:
5681 case Match_InvalidMemoryIndexed8UImm5:
5682 case Match_InvalidMemoryIndexed4UImm5:
5683 case Match_InvalidMemoryIndexed2UImm5:
5684 case Match_InvalidMemoryIndexed1UImm6:
5685 case Match_InvalidMemoryIndexed2UImm6:
5686 case Match_InvalidMemoryIndexed4UImm6:
5687 case Match_InvalidMemoryIndexed8UImm6:
5688 case Match_InvalidMemoryIndexed16UImm6:
5689 case Match_InvalidMemoryIndexedSImm6:
5690 case Match_InvalidMemoryIndexedSImm5:
5691 case Match_InvalidMemoryIndexedSImm8:
5692 case Match_InvalidMemoryIndexedSImm9:
5693 case Match_InvalidMemoryIndexed16SImm9:
5694 case Match_InvalidMemoryIndexed8SImm10:
5695 case Match_InvalidImm0_1:
5696 case Match_InvalidImm0_3:
5697 case Match_InvalidImm0_7:
5698 case Match_InvalidImm0_15:
5699 case Match_InvalidImm0_31:
5700 case Match_InvalidImm0_63:
5701 case Match_InvalidImm0_127:
5702 case Match_InvalidImm0_255:
5703 case Match_InvalidImm0_65535:
5704 case Match_InvalidImm1_8:
5705 case Match_InvalidImm1_16:
5706 case Match_InvalidImm1_32:
5707 case Match_InvalidImm1_64:
5708 case Match_InvalidSVEAddSubImm8:
5709 case Match_InvalidSVEAddSubImm16:
5710 case Match_InvalidSVEAddSubImm32:
5711 case Match_InvalidSVEAddSubImm64:
5712 case Match_InvalidSVECpyImm8:
5713 case Match_InvalidSVECpyImm16:
5714 case Match_InvalidSVECpyImm32:
5715 case Match_InvalidSVECpyImm64:
5716 case Match_InvalidIndexRange0_0:
5717 case Match_InvalidIndexRange1_1:
5718 case Match_InvalidIndexRange0_15:
5719 case Match_InvalidIndexRange0_7:
5720 case Match_InvalidIndexRange0_3:
5721 case Match_InvalidIndexRange0_1:
5722 case Match_InvalidSVEIndexRange0_63:
5723 case Match_InvalidSVEIndexRange0_31:
5724 case Match_InvalidSVEIndexRange0_15:
5725 case Match_InvalidSVEIndexRange0_7:
5726 case Match_InvalidSVEIndexRange0_3:
5727 case Match_InvalidLabel:
5728 case Match_InvalidComplexRotationEven:
5729 case Match_InvalidComplexRotationOdd:
5730 case Match_InvalidGPR64shifted8:
5731 case Match_InvalidGPR64shifted16:
5732 case Match_InvalidGPR64shifted32:
5733 case Match_InvalidGPR64shifted64:
5734 case Match_InvalidGPR64shifted128:
5735 case Match_InvalidGPR64NoXZRshifted8:
5736 case Match_InvalidGPR64NoXZRshifted16:
5737 case Match_InvalidGPR64NoXZRshifted32:
5738 case Match_InvalidGPR64NoXZRshifted64:
5739 case Match_InvalidGPR64NoXZRshifted128:
5740 case Match_InvalidZPR32UXTW8:
5741 case Match_InvalidZPR32UXTW16:
5742 case Match_InvalidZPR32UXTW32:
5743 case Match_InvalidZPR32UXTW64:
5744 case Match_InvalidZPR32SXTW8:
5745 case Match_InvalidZPR32SXTW16:
5746 case Match_InvalidZPR32SXTW32:
5747 case Match_InvalidZPR32SXTW64:
5748 case Match_InvalidZPR64UXTW8:
5749 case Match_InvalidZPR64SXTW8:
5750 case Match_InvalidZPR64UXTW16:
5751 case Match_InvalidZPR64SXTW16:
5752 case Match_InvalidZPR64UXTW32:
5753 case Match_InvalidZPR64SXTW32:
5754 case Match_InvalidZPR64UXTW64:
5755 case Match_InvalidZPR64SXTW64:
5756 case Match_InvalidZPR32LSL8:
5757 case Match_InvalidZPR32LSL16:
5758 case Match_InvalidZPR32LSL32:
5759 case Match_InvalidZPR32LSL64:
5760 case Match_InvalidZPR64LSL8:
5761 case Match_InvalidZPR64LSL16:
5762 case Match_InvalidZPR64LSL32:
5763 case Match_InvalidZPR64LSL64:
5764 case Match_InvalidZPR0:
5765 case Match_InvalidZPR8:
5766 case Match_InvalidZPR16:
5767 case Match_InvalidZPR32:
5768 case Match_InvalidZPR64:
5769 case Match_InvalidZPR128:
5770 case Match_InvalidZPR_3b8:
5771 case Match_InvalidZPR_3b16:
5772 case Match_InvalidZPR_3b32:
5773 case Match_InvalidZPR_4b16:
5774 case Match_InvalidZPR_4b32:
5775 case Match_InvalidZPR_4b64:
5776 case Match_InvalidSVEPredicateAnyReg:
5777 case Match_InvalidSVEPattern:
5778 case Match_InvalidSVEPredicateBReg:
5779 case Match_InvalidSVEPredicateHReg:
5780 case Match_InvalidSVEPredicateSReg:
5781 case Match_InvalidSVEPredicateDReg:
5782 case Match_InvalidSVEPredicate3bAnyReg:
5783 case Match_InvalidSVEExactFPImmOperandHalfOne:
5784 case Match_InvalidSVEExactFPImmOperandHalfTwo:
5785 case Match_InvalidSVEExactFPImmOperandZeroOne:
5786 case Match_InvalidMatrixTile32:
5787 case Match_InvalidMatrixTile64:
5788 case Match_InvalidMatrix:
5789 case Match_InvalidMatrixTileVectorH8:
5790 case Match_InvalidMatrixTileVectorH16:
5791 case Match_InvalidMatrixTileVectorH32:
5792 case Match_InvalidMatrixTileVectorH64:
5793 case Match_InvalidMatrixTileVectorH128:
5794 case Match_InvalidMatrixTileVectorV8:
5795 case Match_InvalidMatrixTileVectorV16:
5796 case Match_InvalidMatrixTileVectorV32:
5797 case Match_InvalidMatrixTileVectorV64:
5798 case Match_InvalidMatrixTileVectorV128:
5799 case Match_InvalidSVCR:
5800 case Match_InvalidMatrixIndexGPR32_12_15:
5801 case Match_MSR:
5802 case Match_MRS: {
5803 if (ErrorInfo >= Operands.size())
5804 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5805 // Any time we get here, there's nothing fancy to do. Just get the
5806 // operand SMLoc and display the diagnostic.
5807 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5808 if (ErrorLoc == SMLoc())
5809 ErrorLoc = IDLoc;
5810 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5811 }
5812 }
5813
5814 llvm_unreachable("Implement any new match types added!")__builtin_unreachable();
5815}
5816
5817/// ParseDirective parses the arm specific directives
5818bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5819 const MCContext::Environment Format = getContext().getObjectFileType();
5820 bool IsMachO = Format == MCContext::IsMachO;
1
Assuming 'Format' is not equal to IsMachO
5821 bool IsCOFF = Format == MCContext::IsCOFF;
2
Assuming 'Format' is equal to IsCOFF
5822
5823 auto IDVal = DirectiveID.getIdentifier().lower();
5824 SMLoc Loc = DirectiveID.getLoc();
5825 if (IDVal == ".arch")
3
Taking false branch
5826 parseDirectiveArch(Loc);
5827 else if (IDVal == ".cpu")
4
Taking false branch
5828 parseDirectiveCPU(Loc);
5829 else if (IDVal == ".tlsdesccall")
5830 parseDirectiveTLSDescCall(Loc);
5831 else if (IDVal == ".ltorg" || IDVal == ".pool")
5
Taking false branch
5832 parseDirectiveLtorg(Loc);
5833 else if (IDVal == ".unreq")
6
Taking false branch
5834 parseDirectiveUnreq(Loc);
5835 else if (IDVal == ".inst")
7
Taking false branch
5836 parseDirectiveInst(Loc);
5837 else if (IDVal == ".cfi_negate_ra_state")
8
Taking false branch
5838 parseDirectiveCFINegateRAState();
5839 else if (IDVal == ".cfi_b_key_frame")
9
Taking false branch
5840 parseDirectiveCFIBKeyFrame();
5841 else if (IDVal == ".arch_extension")
10
Taking false branch
5842 parseDirectiveArchExtension(Loc);
5843 else if (IDVal == ".variant_pcs")
11
Taking false branch
5844 parseDirectiveVariantPCS(Loc);
5845 else if (IsMachO
11.1
'IsMachO' is false
) {
12
Taking false branch
5846 if (IDVal == MCLOHDirectiveName())
5847 parseDirectiveLOH(IDVal, Loc);
5848 else
5849 return true;
5850 } else if (IsCOFF
12.1
'IsCOFF' is true
) {
13
Taking true branch
5851 if (IDVal == ".seh_stackalloc")
14
Taking false branch
5852 parseDirectiveSEHAllocStack(Loc);
5853 else if (IDVal == ".seh_endprologue")
15
Taking false branch
5854 parseDirectiveSEHPrologEnd(Loc);
5855 else if (IDVal == ".seh_save_r19r20_x")
16
Taking false branch
5856 parseDirectiveSEHSaveR19R20X(Loc);
5857 else if (IDVal == ".seh_save_fplr")
17
Taking false branch
5858 parseDirectiveSEHSaveFPLR(Loc);
5859 else if (IDVal == ".seh_save_fplr_x")
18
Taking false branch
5860 parseDirectiveSEHSaveFPLRX(Loc);