Bug Summary

File:llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Warning:line 5309, column 34
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AArch64AsmParser.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-11/lib/clang/11.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/include -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/lib/Target/AArch64/AsmParser/.. -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/.. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-11/lib/clang/11.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/lib/Target/AArch64/AsmParser -fdebug-prefix-map=/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347=. -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-03-09-184146-41876-1 -x c++ /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "MCTargetDesc/AArch64AddressingModes.h"
10#include "MCTargetDesc/AArch64MCExpr.h"
11#include "MCTargetDesc/AArch64MCTargetDesc.h"
12#include "MCTargetDesc/AArch64TargetStreamer.h"
13#include "TargetInfo/AArch64TargetInfo.h"
14#include "AArch64InstrInfo.h"
15#include "Utils/AArch64BaseInfo.h"
16#include "llvm/ADT/APFloat.h"
17#include "llvm/ADT/APInt.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/ADT/StringExtras.h"
22#include "llvm/ADT/StringMap.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/StringSwitch.h"
25#include "llvm/ADT/Twine.h"
26#include "llvm/MC/MCContext.h"
27#include "llvm/MC/MCExpr.h"
28#include "llvm/MC/MCInst.h"
29#include "llvm/MC/MCLinkerOptimizationHint.h"
30#include "llvm/MC/MCObjectFileInfo.h"
31#include "llvm/MC/MCParser/MCAsmLexer.h"
32#include "llvm/MC/MCParser/MCAsmParser.h"
33#include "llvm/MC/MCParser/MCAsmParserExtension.h"
34#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
35#include "llvm/MC/MCParser/MCTargetAsmParser.h"
36#include "llvm/MC/MCRegisterInfo.h"
37#include "llvm/MC/MCStreamer.h"
38#include "llvm/MC/MCSubtargetInfo.h"
39#include "llvm/MC/MCSymbol.h"
40#include "llvm/MC/MCTargetOptions.h"
41#include "llvm/MC/SubtargetFeature.h"
42#include "llvm/MC/MCValue.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/Compiler.h"
45#include "llvm/Support/ErrorHandling.h"
46#include "llvm/Support/MathExtras.h"
47#include "llvm/Support/SMLoc.h"
48#include "llvm/Support/TargetParser.h"
49#include "llvm/Support/TargetRegistry.h"
50#include "llvm/Support/raw_ostream.h"
51#include <cassert>
52#include <cctype>
53#include <cstdint>
54#include <cstdio>
55#include <string>
56#include <tuple>
57#include <utility>
58#include <vector>
59
60using namespace llvm;
61
62namespace {
63
64enum class RegKind {
65 Scalar,
66 NeonVector,
67 SVEDataVector,
68 SVEPredicateVector
69};
70
71enum RegConstraintEqualityTy {
72 EqualsReg,
73 EqualsSuperReg,
74 EqualsSubReg
75};
76
77class AArch64AsmParser : public MCTargetAsmParser {
78private:
79 StringRef Mnemonic; ///< Instruction mnemonic.
80
81 // Map of register aliases registers via the .req directive.
82 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
83
84 class PrefixInfo {
85 public:
86 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
87 PrefixInfo Prefix;
88 switch (Inst.getOpcode()) {
89 case AArch64::MOVPRFX_ZZ:
90 Prefix.Active = true;
91 Prefix.Dst = Inst.getOperand(0).getReg();
92 break;
93 case AArch64::MOVPRFX_ZPmZ_B:
94 case AArch64::MOVPRFX_ZPmZ_H:
95 case AArch64::MOVPRFX_ZPmZ_S:
96 case AArch64::MOVPRFX_ZPmZ_D:
97 Prefix.Active = true;
98 Prefix.Predicated = true;
99 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
100 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 101, __PRETTY_FUNCTION__))
101 "No destructive element size set for movprfx")((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 101, __PRETTY_FUNCTION__))
;
102 Prefix.Dst = Inst.getOperand(0).getReg();
103 Prefix.Pg = Inst.getOperand(2).getReg();
104 break;
105 case AArch64::MOVPRFX_ZPzZ_B:
106 case AArch64::MOVPRFX_ZPzZ_H:
107 case AArch64::MOVPRFX_ZPzZ_S:
108 case AArch64::MOVPRFX_ZPzZ_D:
109 Prefix.Active = true;
110 Prefix.Predicated = true;
111 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
112 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 113, __PRETTY_FUNCTION__))
113 "No destructive element size set for movprfx")((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 113, __PRETTY_FUNCTION__))
;
114 Prefix.Dst = Inst.getOperand(0).getReg();
115 Prefix.Pg = Inst.getOperand(1).getReg();
116 break;
117 default:
118 break;
119 }
120
121 return Prefix;
122 }
123
124 PrefixInfo() : Active(false), Predicated(false) {}
125 bool isActive() const { return Active; }
126 bool isPredicated() const { return Predicated; }
127 unsigned getElementSize() const {
128 assert(Predicated)((Predicated) ? static_cast<void> (0) : __assert_fail (
"Predicated", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 128, __PRETTY_FUNCTION__))
;
129 return ElementSize;
130 }
131 unsigned getDstReg() const { return Dst; }
132 unsigned getPgReg() const {
133 assert(Predicated)((Predicated) ? static_cast<void> (0) : __assert_fail (
"Predicated", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 133, __PRETTY_FUNCTION__))
;
134 return Pg;
135 }
136
137 private:
138 bool Active;
139 bool Predicated;
140 unsigned ElementSize;
141 unsigned Dst;
142 unsigned Pg;
143 } NextPrefix;
144
145 AArch64TargetStreamer &getTargetStreamer() {
146 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
147 return static_cast<AArch64TargetStreamer &>(TS);
148 }
149
150 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
151
152 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
153 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
154 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
155 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
156 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
157 bool parseRegister(OperandVector &Operands);
158 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
159 bool parseNeonVectorList(OperandVector &Operands);
160 bool parseOptionalMulOperand(OperandVector &Operands);
161 bool parseOperand(OperandVector &Operands, bool isCondCode,
162 bool invertCondCode);
163
164 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
165 OperandVector &Operands);
166
167 bool parseDirectiveArch(SMLoc L);
168 bool parseDirectiveArchExtension(SMLoc L);
169 bool parseDirectiveCPU(SMLoc L);
170 bool parseDirectiveInst(SMLoc L);
171
172 bool parseDirectiveTLSDescCall(SMLoc L);
173
174 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
175 bool parseDirectiveLtorg(SMLoc L);
176
177 bool parseDirectiveReq(StringRef Name, SMLoc L);
178 bool parseDirectiveUnreq(SMLoc L);
179 bool parseDirectiveCFINegateRAState();
180 bool parseDirectiveCFIBKeyFrame();
181
182 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
183 SmallVectorImpl<SMLoc> &Loc);
184 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
185 OperandVector &Operands, MCStreamer &Out,
186 uint64_t &ErrorInfo,
187 bool MatchingInlineAsm) override;
188/// @name Auto-generated Match Functions
189/// {
190
191#define GET_ASSEMBLER_HEADER
192#include "AArch64GenAsmMatcher.inc"
193
194 /// }
195
196 OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
197 OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
198 RegKind MatchKind);
199 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
200 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
201 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
202 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
203 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
204 template <bool IsSVEPrefetch = false>
205 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
206 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
207 OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
208 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
209 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
210 template<bool AddFPZeroAsLiteral>
211 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
212 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
213 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
214 bool tryParseNeonVectorRegister(OperandVector &Operands);
215 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
216 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
217 template <bool ParseShiftExtend,
218 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
219 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
220 template <bool ParseShiftExtend, bool ParseSuffix>
221 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
222 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
223 template <RegKind VectorKind>
224 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
225 bool ExpectMatch = false);
226 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
227
228public:
229 enum AArch64MatchResultTy {
230 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
231#define GET_OPERAND_DIAGNOSTIC_TYPES
232#include "AArch64GenAsmMatcher.inc"
233 };
234 bool IsILP32;
235
236 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
237 const MCInstrInfo &MII, const MCTargetOptions &Options)
238 : MCTargetAsmParser(Options, STI, MII) {
239 IsILP32 = Options.getABIName() == "ilp32";
240 MCAsmParserExtension::Initialize(Parser);
241 MCStreamer &S = getParser().getStreamer();
242 if (S.getTargetStreamer() == nullptr)
243 new AArch64TargetStreamer(S);
244
245 // Alias .hword/.word/.[dx]word to the target-independent
246 // .2byte/.4byte/.8byte directives as they have the same form and
247 // semantics:
248 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
249 Parser.addAliasForDirective(".hword", ".2byte");
250 Parser.addAliasForDirective(".word", ".4byte");
251 Parser.addAliasForDirective(".dword", ".8byte");
252 Parser.addAliasForDirective(".xword", ".8byte");
253
254 // Initialize the set of available features.
255 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
256 }
257
258 bool regsEqual(const MCParsedAsmOperand &Op1,
259 const MCParsedAsmOperand &Op2) const override;
260 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
261 SMLoc NameLoc, OperandVector &Operands) override;
262 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
263 OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
264 SMLoc &EndLoc) override;
265 bool ParseDirective(AsmToken DirectiveID) override;
266 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
267 unsigned Kind) override;
268
269 static bool classifySymbolRef(const MCExpr *Expr,
270 AArch64MCExpr::VariantKind &ELFRefKind,
271 MCSymbolRefExpr::VariantKind &DarwinRefKind,
272 int64_t &Addend);
273};
274
275/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
276/// instruction.
277class AArch64Operand : public MCParsedAsmOperand {
278private:
279 enum KindTy {
280 k_Immediate,
281 k_ShiftedImm,
282 k_CondCode,
283 k_Register,
284 k_VectorList,
285 k_VectorIndex,
286 k_Token,
287 k_SysReg,
288 k_SysCR,
289 k_Prefetch,
290 k_ShiftExtend,
291 k_FPImm,
292 k_Barrier,
293 k_PSBHint,
294 k_BTIHint,
295 } Kind;
296
297 SMLoc StartLoc, EndLoc;
298
299 struct TokOp {
300 const char *Data;
301 unsigned Length;
302 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
303 };
304
305 // Separate shift/extend operand.
306 struct ShiftExtendOp {
307 AArch64_AM::ShiftExtendType Type;
308 unsigned Amount;
309 bool HasExplicitAmount;
310 };
311
312 struct RegOp {
313 unsigned RegNum;
314 RegKind Kind;
315 int ElementWidth;
316
317 // The register may be allowed as a different register class,
318 // e.g. for GPR64as32 or GPR32as64.
319 RegConstraintEqualityTy EqualityTy;
320
321 // In some cases the shift/extend needs to be explicitly parsed together
322 // with the register, rather than as a separate operand. This is needed
323 // for addressing modes where the instruction as a whole dictates the
324 // scaling/extend, rather than specific bits in the instruction.
325 // By parsing them as a single operand, we avoid the need to pass an
326 // extra operand in all CodeGen patterns (because all operands need to
327 // have an associated value), and we avoid the need to update TableGen to
328 // accept operands that have no associated bits in the instruction.
329 //
330 // An added benefit of parsing them together is that the assembler
331 // can give a sensible diagnostic if the scaling is not correct.
332 //
333 // The default is 'lsl #0' (HasExplicitAmount = false) if no
334 // ShiftExtend is specified.
335 ShiftExtendOp ShiftExtend;
336 };
337
338 struct VectorListOp {
339 unsigned RegNum;
340 unsigned Count;
341 unsigned NumElements;
342 unsigned ElementWidth;
343 RegKind RegisterKind;
344 };
345
346 struct VectorIndexOp {
347 unsigned Val;
348 };
349
350 struct ImmOp {
351 const MCExpr *Val;
352 };
353
354 struct ShiftedImmOp {
355 const MCExpr *Val;
356 unsigned ShiftAmount;
357 };
358
359 struct CondCodeOp {
360 AArch64CC::CondCode Code;
361 };
362
363 struct FPImmOp {
364 uint64_t Val; // APFloat value bitcasted to uint64_t.
365 bool IsExact; // describes whether parsed value was exact.
366 };
367
368 struct BarrierOp {
369 const char *Data;
370 unsigned Length;
371 unsigned Val; // Not the enum since not all values have names.
372 };
373
374 struct SysRegOp {
375 const char *Data;
376 unsigned Length;
377 uint32_t MRSReg;
378 uint32_t MSRReg;
379 uint32_t PStateField;
380 };
381
382 struct SysCRImmOp {
383 unsigned Val;
384 };
385
386 struct PrefetchOp {
387 const char *Data;
388 unsigned Length;
389 unsigned Val;
390 };
391
392 struct PSBHintOp {
393 const char *Data;
394 unsigned Length;
395 unsigned Val;
396 };
397
398 struct BTIHintOp {
399 const char *Data;
400 unsigned Length;
401 unsigned Val;
402 };
403
404 struct ExtendOp {
405 unsigned Val;
406 };
407
408 union {
409 struct TokOp Tok;
410 struct RegOp Reg;
411 struct VectorListOp VectorList;
412 struct VectorIndexOp VectorIndex;
413 struct ImmOp Imm;
414 struct ShiftedImmOp ShiftedImm;
415 struct CondCodeOp CondCode;
416 struct FPImmOp FPImm;
417 struct BarrierOp Barrier;
418 struct SysRegOp SysReg;
419 struct SysCRImmOp SysCRImm;
420 struct PrefetchOp Prefetch;
421 struct PSBHintOp PSBHint;
422 struct BTIHintOp BTIHint;
423 struct ShiftExtendOp ShiftExtend;
424 };
425
426 // Keep the MCContext around as the MCExprs may need manipulated during
427 // the add<>Operands() calls.
428 MCContext &Ctx;
429
430public:
431 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
432
433 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
434 Kind = o.Kind;
435 StartLoc = o.StartLoc;
436 EndLoc = o.EndLoc;
437 switch (Kind) {
438 case k_Token:
439 Tok = o.Tok;
440 break;
441 case k_Immediate:
442 Imm = o.Imm;
443 break;
444 case k_ShiftedImm:
445 ShiftedImm = o.ShiftedImm;
446 break;
447 case k_CondCode:
448 CondCode = o.CondCode;
449 break;
450 case k_FPImm:
451 FPImm = o.FPImm;
452 break;
453 case k_Barrier:
454 Barrier = o.Barrier;
455 break;
456 case k_Register:
457 Reg = o.Reg;
458 break;
459 case k_VectorList:
460 VectorList = o.VectorList;
461 break;
462 case k_VectorIndex:
463 VectorIndex = o.VectorIndex;
464 break;
465 case k_SysReg:
466 SysReg = o.SysReg;
467 break;
468 case k_SysCR:
469 SysCRImm = o.SysCRImm;
470 break;
471 case k_Prefetch:
472 Prefetch = o.Prefetch;
473 break;
474 case k_PSBHint:
475 PSBHint = o.PSBHint;
476 break;
477 case k_BTIHint:
478 BTIHint = o.BTIHint;
479 break;
480 case k_ShiftExtend:
481 ShiftExtend = o.ShiftExtend;
482 break;
483 }
484 }
485
486 /// getStartLoc - Get the location of the first token of this operand.
487 SMLoc getStartLoc() const override { return StartLoc; }
488 /// getEndLoc - Get the location of the last token of this operand.
489 SMLoc getEndLoc() const override { return EndLoc; }
490
491 StringRef getToken() const {
492 assert(Kind == k_Token && "Invalid access!")((Kind == k_Token && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 492, __PRETTY_FUNCTION__))
;
493 return StringRef(Tok.Data, Tok.Length);
494 }
495
496 bool isTokenSuffix() const {
497 assert(Kind == k_Token && "Invalid access!")((Kind == k_Token && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 497, __PRETTY_FUNCTION__))
;
498 return Tok.IsSuffix;
499 }
500
501 const MCExpr *getImm() const {
502 assert(Kind == k_Immediate && "Invalid access!")((Kind == k_Immediate && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 502, __PRETTY_FUNCTION__))
;
503 return Imm.Val;
504 }
505
506 const MCExpr *getShiftedImmVal() const {
507 assert(Kind == k_ShiftedImm && "Invalid access!")((Kind == k_ShiftedImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 507, __PRETTY_FUNCTION__))
;
508 return ShiftedImm.Val;
509 }
510
511 unsigned getShiftedImmShift() const {
512 assert(Kind == k_ShiftedImm && "Invalid access!")((Kind == k_ShiftedImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 512, __PRETTY_FUNCTION__))
;
513 return ShiftedImm.ShiftAmount;
514 }
515
516 AArch64CC::CondCode getCondCode() const {
517 assert(Kind == k_CondCode && "Invalid access!")((Kind == k_CondCode && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 517, __PRETTY_FUNCTION__))
;
518 return CondCode.Code;
519 }
520
521 APFloat getFPImm() const {
522 assert (Kind == k_FPImm && "Invalid access!")((Kind == k_FPImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 522, __PRETTY_FUNCTION__))
;
523 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
524 }
525
526 bool getFPImmIsExact() const {
527 assert (Kind == k_FPImm && "Invalid access!")((Kind == k_FPImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 527, __PRETTY_FUNCTION__))
;
528 return FPImm.IsExact;
529 }
530
531 unsigned getBarrier() const {
532 assert(Kind == k_Barrier && "Invalid access!")((Kind == k_Barrier && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 532, __PRETTY_FUNCTION__))
;
533 return Barrier.Val;
534 }
535
536 StringRef getBarrierName() const {
537 assert(Kind == k_Barrier && "Invalid access!")((Kind == k_Barrier && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 537, __PRETTY_FUNCTION__))
;
538 return StringRef(Barrier.Data, Barrier.Length);
539 }
540
541 unsigned getReg() const override {
542 assert(Kind == k_Register && "Invalid access!")((Kind == k_Register && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 542, __PRETTY_FUNCTION__))
;
543 return Reg.RegNum;
544 }
545
546 RegConstraintEqualityTy getRegEqualityTy() const {
547 assert(Kind == k_Register && "Invalid access!")((Kind == k_Register && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 547, __PRETTY_FUNCTION__))
;
548 return Reg.EqualityTy;
549 }
550
551 unsigned getVectorListStart() const {
552 assert(Kind == k_VectorList && "Invalid access!")((Kind == k_VectorList && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 552, __PRETTY_FUNCTION__))
;
553 return VectorList.RegNum;
554 }
555
556 unsigned getVectorListCount() const {
557 assert(Kind == k_VectorList && "Invalid access!")((Kind == k_VectorList && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 557, __PRETTY_FUNCTION__))
;
558 return VectorList.Count;
559 }
560
561 unsigned getVectorIndex() const {
562 assert(Kind == k_VectorIndex && "Invalid access!")((Kind == k_VectorIndex && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 562, __PRETTY_FUNCTION__))
;
563 return VectorIndex.Val;
564 }
565
566 StringRef getSysReg() const {
567 assert(Kind == k_SysReg && "Invalid access!")((Kind == k_SysReg && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 567, __PRETTY_FUNCTION__))
;
568 return StringRef(SysReg.Data, SysReg.Length);
569 }
570
571 unsigned getSysCR() const {
572 assert(Kind == k_SysCR && "Invalid access!")((Kind == k_SysCR && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 572, __PRETTY_FUNCTION__))
;
573 return SysCRImm.Val;
574 }
575
576 unsigned getPrefetch() const {
577 assert(Kind == k_Prefetch && "Invalid access!")((Kind == k_Prefetch && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 577, __PRETTY_FUNCTION__))
;
578 return Prefetch.Val;
579 }
580
581 unsigned getPSBHint() const {
582 assert(Kind == k_PSBHint && "Invalid access!")((Kind == k_PSBHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 582, __PRETTY_FUNCTION__))
;
583 return PSBHint.Val;
584 }
585
586 StringRef getPSBHintName() const {
587 assert(Kind == k_PSBHint && "Invalid access!")((Kind == k_PSBHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 587, __PRETTY_FUNCTION__))
;
588 return StringRef(PSBHint.Data, PSBHint.Length);
589 }
590
591 unsigned getBTIHint() const {
592 assert(Kind == k_BTIHint && "Invalid access!")((Kind == k_BTIHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 592, __PRETTY_FUNCTION__))
;
593 return BTIHint.Val;
594 }
595
596 StringRef getBTIHintName() const {
597 assert(Kind == k_BTIHint && "Invalid access!")((Kind == k_BTIHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 597, __PRETTY_FUNCTION__))
;
598 return StringRef(BTIHint.Data, BTIHint.Length);
599 }
600
601 StringRef getPrefetchName() const {
602 assert(Kind == k_Prefetch && "Invalid access!")((Kind == k_Prefetch && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 602, __PRETTY_FUNCTION__))
;
603 return StringRef(Prefetch.Data, Prefetch.Length);
604 }
605
606 AArch64_AM::ShiftExtendType getShiftExtendType() const {
607 if (Kind == k_ShiftExtend)
608 return ShiftExtend.Type;
609 if (Kind == k_Register)
610 return Reg.ShiftExtend.Type;
611 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 611)
;
612 }
613
614 unsigned getShiftExtendAmount() const {
615 if (Kind == k_ShiftExtend)
616 return ShiftExtend.Amount;
617 if (Kind == k_Register)
618 return Reg.ShiftExtend.Amount;
619 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 619)
;
620 }
621
622 bool hasShiftExtendAmount() const {
623 if (Kind == k_ShiftExtend)
624 return ShiftExtend.HasExplicitAmount;
625 if (Kind == k_Register)
626 return Reg.ShiftExtend.HasExplicitAmount;
627 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 627)
;
628 }
629
630 bool isImm() const override { return Kind == k_Immediate; }
631 bool isMem() const override { return false; }
632
633 bool isUImm6() const {
634 if (!isImm())
635 return false;
636 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
637 if (!MCE)
638 return false;
639 int64_t Val = MCE->getValue();
640 return (Val >= 0 && Val < 64);
641 }
642
643 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
644
645 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
646 return isImmScaled<Bits, Scale>(true);
647 }
648
649 template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
650 return isImmScaled<Bits, Scale>(false);
651 }
652
653 template <int Bits, int Scale>
654 DiagnosticPredicate isImmScaled(bool Signed) const {
655 if (!isImm())
656 return DiagnosticPredicateTy::NoMatch;
657
658 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
659 if (!MCE)
660 return DiagnosticPredicateTy::NoMatch;
661
662 int64_t MinVal, MaxVal;
663 if (Signed) {
664 int64_t Shift = Bits - 1;
665 MinVal = (int64_t(1) << Shift) * -Scale;
666 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
667 } else {
668 MinVal = 0;
669 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
670 }
671
672 int64_t Val = MCE->getValue();
673 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
674 return DiagnosticPredicateTy::Match;
675
676 return DiagnosticPredicateTy::NearMatch;
677 }
678
679 DiagnosticPredicate isSVEPattern() const {
680 if (!isImm())
681 return DiagnosticPredicateTy::NoMatch;
682 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
683 if (!MCE)
684 return DiagnosticPredicateTy::NoMatch;
685 int64_t Val = MCE->getValue();
686 if (Val >= 0 && Val < 32)
687 return DiagnosticPredicateTy::Match;
688 return DiagnosticPredicateTy::NearMatch;
689 }
690
691 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
692 AArch64MCExpr::VariantKind ELFRefKind;
693 MCSymbolRefExpr::VariantKind DarwinRefKind;
694 int64_t Addend;
695 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
696 Addend)) {
697 // If we don't understand the expression, assume the best and
698 // let the fixup and relocation code deal with it.
699 return true;
700 }
701
702 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
703 ELFRefKind == AArch64MCExpr::VK_LO12 ||
704 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
705 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
706 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
707 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
708 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
709 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
710 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
711 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
712 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
713 // Note that we don't range-check the addend. It's adjusted modulo page
714 // size when converted, so there is no "out of range" condition when using
715 // @pageoff.
716 return true;
717 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
718 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
719 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
720 return Addend == 0;
721 }
722
723 return false;
724 }
725
726 template <int Scale> bool isUImm12Offset() const {
727 if (!isImm())
728 return false;
729
730 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
731 if (!MCE)
732 return isSymbolicUImm12Offset(getImm());
733
734 int64_t Val = MCE->getValue();
735 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
736 }
737
738 template <int N, int M>
739 bool isImmInRange() const {
740 if (!isImm())
741 return false;
742 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
743 if (!MCE)
744 return false;
745 int64_t Val = MCE->getValue();
746 return (Val >= N && Val <= M);
747 }
748
749 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
750 // a logical immediate can always be represented when inverted.
751 template <typename T>
752 bool isLogicalImm() const {
753 if (!isImm())
754 return false;
755 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
756 if (!MCE)
757 return false;
758
759 int64_t Val = MCE->getValue();
760 int64_t SVal = std::make_signed_t<T>(Val);
761 int64_t UVal = std::make_unsigned_t<T>(Val);
762 if (Val != SVal && Val != UVal)
763 return false;
764
765 return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8);
766 }
767
768 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
769
770 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
771 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
772 /// immediate that can be shifted by 'Shift'.
773 template <unsigned Width>
774 Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
775 if (isShiftedImm() && Width == getShiftedImmShift())
776 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
777 return std::make_pair(CE->getValue(), Width);
778
779 if (isImm())
780 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
781 int64_t Val = CE->getValue();
782 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
783 return std::make_pair(Val >> Width, Width);
784 else
785 return std::make_pair(Val, 0u);
786 }
787
788 return {};
789 }
790
791 bool isAddSubImm() const {
792 if (!isShiftedImm() && !isImm())
793 return false;
794
795 const MCExpr *Expr;
796
797 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
798 if (isShiftedImm()) {
799 unsigned Shift = ShiftedImm.ShiftAmount;
800 Expr = ShiftedImm.Val;
801 if (Shift != 0 && Shift != 12)
802 return false;
803 } else {
804 Expr = getImm();
805 }
806
807 AArch64MCExpr::VariantKind ELFRefKind;
808 MCSymbolRefExpr::VariantKind DarwinRefKind;
809 int64_t Addend;
810 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
811 DarwinRefKind, Addend)) {
812 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
813 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
814 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
815 || ELFRefKind == AArch64MCExpr::VK_LO12
816 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
817 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
818 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
819 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
820 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
821 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
822 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
823 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
824 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
825 }
826
827 // If it's a constant, it should be a real immediate in range.
828 if (auto ShiftedVal = getShiftedVal<12>())
829 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
830
831 // If it's an expression, we hope for the best and let the fixup/relocation
832 // code deal with it.
833 return true;
834 }
835
836 bool isAddSubImmNeg() const {
837 if (!isShiftedImm() && !isImm())
838 return false;
839
840 // Otherwise it should be a real negative immediate in range.
841 if (auto ShiftedVal = getShiftedVal<12>())
842 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
843
844 return false;
845 }
846
847 // Signed value in the range -128 to +127. For element widths of
848 // 16 bits or higher it may also be a signed multiple of 256 in the
849 // range -32768 to +32512.
850 // For element-width of 8 bits a range of -128 to 255 is accepted,
851 // since a copy of a byte can be either signed/unsigned.
852 template <typename T>
853 DiagnosticPredicate isSVECpyImm() const {
854 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
855 return DiagnosticPredicateTy::NoMatch;
856
857 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value;
858 if (auto ShiftedImm = getShiftedVal<8>())
859 if (!(IsByte && ShiftedImm->second) &&
860 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
861 << ShiftedImm->second))
862 return DiagnosticPredicateTy::Match;
863
864 return DiagnosticPredicateTy::NearMatch;
865 }
866
867 // Unsigned value in the range 0 to 255. For element widths of
868 // 16 bits or higher it may also be a signed multiple of 256 in the
869 // range 0 to 65280.
870 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
871 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
872 return DiagnosticPredicateTy::NoMatch;
873
874 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value;
875 if (auto ShiftedImm = getShiftedVal<8>())
876 if (!(IsByte && ShiftedImm->second) &&
877 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
878 << ShiftedImm->second))
879 return DiagnosticPredicateTy::Match;
880
881 return DiagnosticPredicateTy::NearMatch;
882 }
883
884 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
885 if (isLogicalImm<T>() && !isSVECpyImm<T>())
886 return DiagnosticPredicateTy::Match;
887 return DiagnosticPredicateTy::NoMatch;
888 }
889
890 bool isCondCode() const { return Kind == k_CondCode; }
891
892 bool isSIMDImmType10() const {
893 if (!isImm())
894 return false;
895 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
896 if (!MCE)
897 return false;
898 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
899 }
900
901 template<int N>
902 bool isBranchTarget() const {
903 if (!isImm())
904 return false;
905 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
906 if (!MCE)
907 return true;
908 int64_t Val = MCE->getValue();
909 if (Val & 0x3)
910 return false;
911 assert(N > 0 && "Branch target immediate cannot be 0 bits!")((N > 0 && "Branch target immediate cannot be 0 bits!"
) ? static_cast<void> (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 911, __PRETTY_FUNCTION__))
;
912 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
913 }
914
915 bool
916 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
917 if (!isImm())
918 return false;
919
920 AArch64MCExpr::VariantKind ELFRefKind;
921 MCSymbolRefExpr::VariantKind DarwinRefKind;
922 int64_t Addend;
923 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
924 DarwinRefKind, Addend)) {
925 return false;
926 }
927 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
928 return false;
929
930 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
931 if (ELFRefKind == AllowedModifiers[i])
932 return true;
933 }
934
935 return false;
936 }
937
938 bool isMovWSymbolG3() const {
939 return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
940 }
941
942 bool isMovWSymbolG2() const {
943 return isMovWSymbol(
944 {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
945 AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
946 AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
947 AArch64MCExpr::VK_DTPREL_G2});
948 }
949
950 bool isMovWSymbolG1() const {
951 return isMovWSymbol(
952 {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
953 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
954 AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
955 AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
956 AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
957 }
958
959 bool isMovWSymbolG0() const {
960 return isMovWSymbol(
961 {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
962 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
963 AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
964 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
965 AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
966 }
967
968 template<int RegWidth, int Shift>
969 bool isMOVZMovAlias() const {
970 if (!isImm()) return false;
971
972 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
973 if (!CE) return false;
974 uint64_t Value = CE->getValue();
975
976 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
977 }
978
979 template<int RegWidth, int Shift>
980 bool isMOVNMovAlias() const {
981 if (!isImm()) return false;
982
983 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
984 if (!CE) return false;
985 uint64_t Value = CE->getValue();
986
987 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
988 }
989
990 bool isFPImm() const {
991 return Kind == k_FPImm &&
992 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
993 }
994
995 bool isBarrier() const { return Kind == k_Barrier; }
996 bool isSysReg() const { return Kind == k_SysReg; }
997
998 bool isMRSSystemRegister() const {
999 if (!isSysReg()) return false;
1000
1001 return SysReg.MRSReg != -1U;
1002 }
1003
1004 bool isMSRSystemRegister() const {
1005 if (!isSysReg()) return false;
1006 return SysReg.MSRReg != -1U;
1007 }
1008
1009 bool isSystemPStateFieldWithImm0_1() const {
1010 if (!isSysReg()) return false;
1011 return (SysReg.PStateField == AArch64PState::PAN ||
1012 SysReg.PStateField == AArch64PState::DIT ||
1013 SysReg.PStateField == AArch64PState::UAO ||
1014 SysReg.PStateField == AArch64PState::SSBS);
1015 }
1016
1017 bool isSystemPStateFieldWithImm0_15() const {
1018 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1019 return SysReg.PStateField != -1U;
1020 }
1021
1022 bool isReg() const override {
1023 return Kind == k_Register;
1024 }
1025
1026 bool isScalarReg() const {
1027 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1028 }
1029
1030 bool isNeonVectorReg() const {
1031 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1032 }
1033
1034 bool isNeonVectorRegLo() const {
1035 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1036 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1037 Reg.RegNum) ||
1038 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1039 Reg.RegNum));
1040 }
1041
1042 template <unsigned Class> bool isSVEVectorReg() const {
1043 RegKind RK;
1044 switch (Class) {
1045 case AArch64::ZPRRegClassID:
1046 case AArch64::ZPR_3bRegClassID:
1047 case AArch64::ZPR_4bRegClassID:
1048 RK = RegKind::SVEDataVector;
1049 break;
1050 case AArch64::PPRRegClassID:
1051 case AArch64::PPR_3bRegClassID:
1052 RK = RegKind::SVEPredicateVector;
1053 break;
1054 default:
1055 llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class",
"/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1055)
;
1056 }
1057
1058 return (Kind == k_Register && Reg.Kind == RK) &&
1059 AArch64MCRegisterClasses[Class].contains(getReg());
1060 }
1061
1062 template <unsigned Class> bool isFPRasZPR() const {
1063 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1064 AArch64MCRegisterClasses[Class].contains(getReg());
1065 }
1066
1067 template <int ElementWidth, unsigned Class>
1068 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1069 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1070 return DiagnosticPredicateTy::NoMatch;
1071
1072 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1073 return DiagnosticPredicateTy::Match;
1074
1075 return DiagnosticPredicateTy::NearMatch;
1076 }
1077
1078 template <int ElementWidth, unsigned Class>
1079 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1080 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1081 return DiagnosticPredicateTy::NoMatch;
1082
1083 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1084 return DiagnosticPredicateTy::Match;
1085
1086 return DiagnosticPredicateTy::NearMatch;
1087 }
1088
1089 template <int ElementWidth, unsigned Class,
1090 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1091 bool ShiftWidthAlwaysSame>
1092 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1093 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1094 if (!VectorMatch.isMatch())
1095 return DiagnosticPredicateTy::NoMatch;
1096
1097 // Give a more specific diagnostic when the user has explicitly typed in
1098 // a shift-amount that does not match what is expected, but for which
1099 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1100 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1101 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1102 ShiftExtendTy == AArch64_AM::SXTW) &&
1103 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1104 return DiagnosticPredicateTy::NoMatch;
1105
1106 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1107 return DiagnosticPredicateTy::Match;
1108
1109 return DiagnosticPredicateTy::NearMatch;
1110 }
1111
1112 bool isGPR32as64() const {
1113 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1114 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1115 }
1116
1117 bool isGPR64as32() const {
1118 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1119 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1120 }
1121
1122 bool isWSeqPair() const {
1123 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1124 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1125 Reg.RegNum);
1126 }
1127
1128 bool isXSeqPair() const {
1129 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1130 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1131 Reg.RegNum);
1132 }
1133
1134 template<int64_t Angle, int64_t Remainder>
1135 DiagnosticPredicate isComplexRotation() const {
1136 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1137
1138 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1139 if (!CE) return DiagnosticPredicateTy::NoMatch;
1140 uint64_t Value = CE->getValue();
1141
1142 if (Value % Angle == Remainder && Value <= 270)
1143 return DiagnosticPredicateTy::Match;
1144 return DiagnosticPredicateTy::NearMatch;
1145 }
1146
1147 template <unsigned RegClassID> bool isGPR64() const {
1148 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1149 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1150 }
1151
1152 template <unsigned RegClassID, int ExtWidth>
1153 DiagnosticPredicate isGPR64WithShiftExtend() const {
1154 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1155 return DiagnosticPredicateTy::NoMatch;
1156
1157 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1158 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1159 return DiagnosticPredicateTy::Match;
1160 return DiagnosticPredicateTy::NearMatch;
1161 }
1162
1163 /// Is this a vector list with the type implicit (presumably attached to the
1164 /// instruction itself)?
1165 template <RegKind VectorKind, unsigned NumRegs>
1166 bool isImplicitlyTypedVectorList() const {
1167 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1168 VectorList.NumElements == 0 &&
1169 VectorList.RegisterKind == VectorKind;
1170 }
1171
1172 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1173 unsigned ElementWidth>
1174 bool isTypedVectorList() const {
1175 if (Kind != k_VectorList)
1176 return false;
1177 if (VectorList.Count != NumRegs)
1178 return false;
1179 if (VectorList.RegisterKind != VectorKind)
1180 return false;
1181 if (VectorList.ElementWidth != ElementWidth)
1182 return false;
1183 return VectorList.NumElements == NumElements;
1184 }
1185
1186 template <int Min, int Max>
1187 DiagnosticPredicate isVectorIndex() const {
1188 if (Kind != k_VectorIndex)
1189 return DiagnosticPredicateTy::NoMatch;
1190 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1191 return DiagnosticPredicateTy::Match;
1192 return DiagnosticPredicateTy::NearMatch;
1193 }
1194
1195 bool isToken() const override { return Kind == k_Token; }
1196
1197 bool isTokenEqual(StringRef Str) const {
1198 return Kind == k_Token && getToken() == Str;
1199 }
1200 bool isSysCR() const { return Kind == k_SysCR; }
1201 bool isPrefetch() const { return Kind == k_Prefetch; }
1202 bool isPSBHint() const { return Kind == k_PSBHint; }
1203 bool isBTIHint() const { return Kind == k_BTIHint; }
1204 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1205 bool isShifter() const {
1206 if (!isShiftExtend())
1207 return false;
1208
1209 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1210 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1211 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1212 ST == AArch64_AM::MSL);
1213 }
1214
1215 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1216 if (Kind != k_FPImm)
1217 return DiagnosticPredicateTy::NoMatch;
1218
1219 if (getFPImmIsExact()) {
1220 // Lookup the immediate from table of supported immediates.
1221 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1222 assert(Desc && "Unknown enum value")((Desc && "Unknown enum value") ? static_cast<void
> (0) : __assert_fail ("Desc && \"Unknown enum value\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1222, __PRETTY_FUNCTION__))
;
1223
1224 // Calculate its FP value.
1225 APFloat RealVal(APFloat::IEEEdouble());
1226 auto StatusOrErr =
1227 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1228 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1229 llvm_unreachable("FP immediate is not exact")::llvm::llvm_unreachable_internal("FP immediate is not exact"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1229)
;
1230
1231 if (getFPImm().bitwiseIsEqual(RealVal))
1232 return DiagnosticPredicateTy::Match;
1233 }
1234
1235 return DiagnosticPredicateTy::NearMatch;
1236 }
1237
1238 template <unsigned ImmA, unsigned ImmB>
1239 DiagnosticPredicate isExactFPImm() const {
1240 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1241 if ((Res = isExactFPImm<ImmA>()))
1242 return DiagnosticPredicateTy::Match;
1243 if ((Res = isExactFPImm<ImmB>()))
1244 return DiagnosticPredicateTy::Match;
1245 return Res;
1246 }
1247
1248 bool isExtend() const {
1249 if (!isShiftExtend())
1250 return false;
1251
1252 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1253 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1254 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1255 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1256 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1257 ET == AArch64_AM::LSL) &&
1258 getShiftExtendAmount() <= 4;
1259 }
1260
1261 bool isExtend64() const {
1262 if (!isExtend())
1263 return false;
1264 // Make sure the extend expects a 32-bit source register.
1265 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1266 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1267 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1268 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1269 }
1270
1271 bool isExtendLSL64() const {
1272 if (!isExtend())
1273 return false;
1274 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1275 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1276 ET == AArch64_AM::LSL) &&
1277 getShiftExtendAmount() <= 4;
1278 }
1279
1280 template<int Width> bool isMemXExtend() const {
1281 if (!isExtend())
1282 return false;
1283 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1284 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1285 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1286 getShiftExtendAmount() == 0);
1287 }
1288
1289 template<int Width> bool isMemWExtend() const {
1290 if (!isExtend())
1291 return false;
1292 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1293 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1294 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1295 getShiftExtendAmount() == 0);
1296 }
1297
1298 template <unsigned width>
1299 bool isArithmeticShifter() const {
1300 if (!isShifter())
1301 return false;
1302
1303 // An arithmetic shifter is LSL, LSR, or ASR.
1304 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1305 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1306 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1307 }
1308
1309 template <unsigned width>
1310 bool isLogicalShifter() const {
1311 if (!isShifter())
1312 return false;
1313
1314 // A logical shifter is LSL, LSR, ASR or ROR.
1315 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1316 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1317 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1318 getShiftExtendAmount() < width;
1319 }
1320
1321 bool isMovImm32Shifter() const {
1322 if (!isShifter())
1323 return false;
1324
1325 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1326 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1327 if (ST != AArch64_AM::LSL)
1328 return false;
1329 uint64_t Val = getShiftExtendAmount();
1330 return (Val == 0 || Val == 16);
1331 }
1332
1333 bool isMovImm64Shifter() const {
1334 if (!isShifter())
1335 return false;
1336
1337 // A MOVi shifter is LSL of 0 or 16.
1338 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1339 if (ST != AArch64_AM::LSL)
1340 return false;
1341 uint64_t Val = getShiftExtendAmount();
1342 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1343 }
1344
1345 bool isLogicalVecShifter() const {
1346 if (!isShifter())
1347 return false;
1348
1349 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1350 unsigned Shift = getShiftExtendAmount();
1351 return getShiftExtendType() == AArch64_AM::LSL &&
1352 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1353 }
1354
1355 bool isLogicalVecHalfWordShifter() const {
1356 if (!isLogicalVecShifter())
1357 return false;
1358
1359 // A logical vector shifter is a left shift by 0 or 8.
1360 unsigned Shift = getShiftExtendAmount();
1361 return getShiftExtendType() == AArch64_AM::LSL &&
1362 (Shift == 0 || Shift == 8);
1363 }
1364
1365 bool isMoveVecShifter() const {
1366 if (!isShiftExtend())
1367 return false;
1368
1369 // A logical vector shifter is a left shift by 8 or 16.
1370 unsigned Shift = getShiftExtendAmount();
1371 return getShiftExtendType() == AArch64_AM::MSL &&
1372 (Shift == 8 || Shift == 16);
1373 }
1374
1375 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1376 // to LDUR/STUR when the offset is not legal for the former but is for
1377 // the latter. As such, in addition to checking for being a legal unscaled
1378 // address, also check that it is not a legal scaled address. This avoids
1379 // ambiguity in the matcher.
1380 template<int Width>
1381 bool isSImm9OffsetFB() const {
1382 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1383 }
1384
1385 bool isAdrpLabel() const {
1386 // Validation was handled during parsing, so we just sanity check that
1387 // something didn't go haywire.
1388 if (!isImm())
1389 return false;
1390
1391 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1392 int64_t Val = CE->getValue();
1393 int64_t Min = - (4096 * (1LL << (21 - 1)));
1394 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1395 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1396 }
1397
1398 return true;
1399 }
1400
1401 bool isAdrLabel() const {
1402 // Validation was handled during parsing, so we just sanity check that
1403 // something didn't go haywire.
1404 if (!isImm())
1405 return false;
1406
1407 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1408 int64_t Val = CE->getValue();
1409 int64_t Min = - (1LL << (21 - 1));
1410 int64_t Max = ((1LL << (21 - 1)) - 1);
1411 return Val >= Min && Val <= Max;
1412 }
1413
1414 return true;
1415 }
1416
1417 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1418 // Add as immediates when possible. Null MCExpr = 0.
1419 if (!Expr)
1420 Inst.addOperand(MCOperand::createImm(0));
1421 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1422 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1423 else
1424 Inst.addOperand(MCOperand::createExpr(Expr));
1425 }
1426
1427 void addRegOperands(MCInst &Inst, unsigned N) const {
1428 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1428, __PRETTY_FUNCTION__))
;
1429 Inst.addOperand(MCOperand::createReg(getReg()));
1430 }
1431
1432 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1433 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1433, __PRETTY_FUNCTION__))
;
1434 assert(((AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1435, __PRETTY_FUNCTION__))
1435 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1435, __PRETTY_FUNCTION__))
;
1436
1437 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1438 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1439 RI->getEncodingValue(getReg()));
1440
1441 Inst.addOperand(MCOperand::createReg(Reg));
1442 }
1443
1444 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1445 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1445, __PRETTY_FUNCTION__))
;
1446 assert(((AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1447, __PRETTY_FUNCTION__))
1447 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1447, __PRETTY_FUNCTION__))
;
1448
1449 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1450 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1451 RI->getEncodingValue(getReg()));
1452
1453 Inst.addOperand(MCOperand::createReg(Reg));
1454 }
1455
1456 template <int Width>
1457 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1458 unsigned Base;
1459 switch (Width) {
1460 case 8: Base = AArch64::B0; break;
1461 case 16: Base = AArch64::H0; break;
1462 case 32: Base = AArch64::S0; break;
1463 case 64: Base = AArch64::D0; break;
1464 case 128: Base = AArch64::Q0; break;
1465 default:
1466 llvm_unreachable("Unsupported width")::llvm::llvm_unreachable_internal("Unsupported width", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1466)
;
1467 }
1468 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1469 }
1470
1471 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1472 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1472, __PRETTY_FUNCTION__))
;
1473 assert(((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1474, __PRETTY_FUNCTION__))
1474 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1474, __PRETTY_FUNCTION__))
;
1475 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1476 }
1477
1478 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1479 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1479, __PRETTY_FUNCTION__))
;
1480 assert(((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1481, __PRETTY_FUNCTION__))
1481 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1481, __PRETTY_FUNCTION__))
;
1482 Inst.addOperand(MCOperand::createReg(getReg()));
1483 }
1484
1485 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1486 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1486, __PRETTY_FUNCTION__))
;
1487 Inst.addOperand(MCOperand::createReg(getReg()));
1488 }
1489
1490 enum VecListIndexType {
1491 VecListIdx_DReg = 0,
1492 VecListIdx_QReg = 1,
1493 VecListIdx_ZReg = 2,
1494 };
1495
1496 template <VecListIndexType RegTy, unsigned NumRegs>
1497 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1498 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1498, __PRETTY_FUNCTION__))
;
1499 static const unsigned FirstRegs[][5] = {
1500 /* DReg */ { AArch64::Q0,
1501 AArch64::D0, AArch64::D0_D1,
1502 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1503 /* QReg */ { AArch64::Q0,
1504 AArch64::Q0, AArch64::Q0_Q1,
1505 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1506 /* ZReg */ { AArch64::Z0,
1507 AArch64::Z0, AArch64::Z0_Z1,
1508 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1509 };
1510
1511 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs"
) ? static_cast<void> (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1512, __PRETTY_FUNCTION__))
1512 " NumRegs must be <= 4 for ZRegs")(((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs"
) ? static_cast<void> (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1512, __PRETTY_FUNCTION__))
;
1513
1514 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1515 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1516 FirstRegs[(unsigned)RegTy][0]));
1517 }
1518
1519 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1520 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1520, __PRETTY_FUNCTION__))
;
1521 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1522 }
1523
1524 template <unsigned ImmIs0, unsigned ImmIs1>
1525 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1526 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1526, __PRETTY_FUNCTION__))
;
1527 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")((bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand"
) ? static_cast<void> (0) : __assert_fail ("bool(isExactFPImm<ImmIs0, ImmIs1>()) && \"Invalid operand\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1527, __PRETTY_FUNCTION__))
;
1528 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1529 }
1530
1531 void addImmOperands(MCInst &Inst, unsigned N) const {
1532 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1532, __PRETTY_FUNCTION__))
;
1533 // If this is a pageoff symrefexpr with an addend, adjust the addend
1534 // to be only the page-offset portion. Otherwise, just add the expr
1535 // as-is.
1536 addExpr(Inst, getImm());
1537 }
1538
1539 template <int Shift>
1540 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1541 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1541, __PRETTY_FUNCTION__))
;
1542 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1543 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1544 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1545 } else if (isShiftedImm()) {
1546 addExpr(Inst, getShiftedImmVal());
1547 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1548 } else {
1549 addExpr(Inst, getImm());
1550 Inst.addOperand(MCOperand::createImm(0));
1551 }
1552 }
1553
1554 template <int Shift>
1555 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1556 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1556, __PRETTY_FUNCTION__))
;
1557 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1558 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1559 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1560 } else
1561 llvm_unreachable("Not a shifted negative immediate")::llvm::llvm_unreachable_internal("Not a shifted negative immediate"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1561)
;
1562 }
1563
1564 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1565 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1565, __PRETTY_FUNCTION__))
;
1566 Inst.addOperand(MCOperand::createImm(getCondCode()));
1567 }
1568
1569 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1570 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1570, __PRETTY_FUNCTION__))
;
1571 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1572 if (!MCE)
1573 addExpr(Inst, getImm());
1574 else
1575 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1576 }
1577
1578 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1579 addImmOperands(Inst, N);
1580 }
1581
1582 template<int Scale>
1583 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1584 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1584, __PRETTY_FUNCTION__))
;
1585 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1586
1587 if (!MCE) {
1588 Inst.addOperand(MCOperand::createExpr(getImm()));
1589 return;
1590 }
1591 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1592 }
1593
1594 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1595 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1595, __PRETTY_FUNCTION__))
;
1596 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1597 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1598 }
1599
1600 template <int Scale>
1601 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1602 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1602, __PRETTY_FUNCTION__))
;
1603 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1604 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1605 }
1606
1607 template <typename T>
1608 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1609 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1609, __PRETTY_FUNCTION__))
;
1610 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1611 std::make_unsigned_t<T> Val = MCE->getValue();
1612 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1613 Inst.addOperand(MCOperand::createImm(encoding));
1614 }
1615
1616 template <typename T>
1617 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1618 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1618, __PRETTY_FUNCTION__))
;
1619 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1620 std::make_unsigned_t<T> Val = ~MCE->getValue();
1621 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1622 Inst.addOperand(MCOperand::createImm(encoding));
1623 }
1624
1625 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1626 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1626, __PRETTY_FUNCTION__))
;
1627 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1628 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1629 Inst.addOperand(MCOperand::createImm(encoding));
1630 }
1631
1632 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1633 // Branch operands don't encode the low bits, so shift them off
1634 // here. If it's a label, however, just put it on directly as there's
1635 // not enough information now to do anything.
1636 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1636, __PRETTY_FUNCTION__))
;
1637 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1638 if (!MCE) {
1639 addExpr(Inst, getImm());
1640 return;
1641 }
1642 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1642, __PRETTY_FUNCTION__))
;
1643 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1644 }
1645
1646 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1647 // Branch operands don't encode the low bits, so shift them off
1648 // here. If it's a label, however, just put it on directly as there's
1649 // not enough information now to do anything.
1650 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1650, __PRETTY_FUNCTION__))
;
1651 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1652 if (!MCE) {
1653 addExpr(Inst, getImm());
1654 return;
1655 }
1656 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1656, __PRETTY_FUNCTION__))
;
1657 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1658 }
1659
1660 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1661 // Branch operands don't encode the low bits, so shift them off
1662 // here. If it's a label, however, just put it on directly as there's
1663 // not enough information now to do anything.
1664 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1664, __PRETTY_FUNCTION__))
;
1665 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1666 if (!MCE) {
1667 addExpr(Inst, getImm());
1668 return;
1669 }
1670 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1670, __PRETTY_FUNCTION__))
;
1671 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1672 }
1673
1674 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1675 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1675, __PRETTY_FUNCTION__))
;
1676 Inst.addOperand(MCOperand::createImm(
1677 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1678 }
1679
1680 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1681 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1681, __PRETTY_FUNCTION__))
;
1682 Inst.addOperand(MCOperand::createImm(getBarrier()));
1683 }
1684
1685 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1686 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1686, __PRETTY_FUNCTION__))
;
1687
1688 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1689 }
1690
1691 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1692 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1692, __PRETTY_FUNCTION__))
;
1693
1694 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1695 }
1696
1697 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1698 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1698, __PRETTY_FUNCTION__))
;
1699
1700 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1701 }
1702
1703 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1704 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1704, __PRETTY_FUNCTION__))
;
1705
1706 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1707 }
1708
1709 void addSysCROperands(MCInst &Inst, unsigned N) const {
1710 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1710, __PRETTY_FUNCTION__))
;
1711 Inst.addOperand(MCOperand::createImm(getSysCR()));
1712 }
1713
1714 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1715 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1715, __PRETTY_FUNCTION__))
;
1716 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1717 }
1718
1719 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1720 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1720, __PRETTY_FUNCTION__))
;
1721 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1722 }
1723
1724 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1725 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1725, __PRETTY_FUNCTION__))
;
1726 Inst.addOperand(MCOperand::createImm(getBTIHint()));
1727 }
1728
1729 void addShifterOperands(MCInst &Inst, unsigned N) const {
1730 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1730, __PRETTY_FUNCTION__))
;
1731 unsigned Imm =
1732 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1733 Inst.addOperand(MCOperand::createImm(Imm));
1734 }
1735
1736 void addExtendOperands(MCInst &Inst, unsigned N) const {
1737 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1737, __PRETTY_FUNCTION__))
;
1738 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1739 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1740 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1741 Inst.addOperand(MCOperand::createImm(Imm));
1742 }
1743
1744 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1745 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1745, __PRETTY_FUNCTION__))
;
1746 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1747 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1748 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1749 Inst.addOperand(MCOperand::createImm(Imm));
1750 }
1751
1752 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1753 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1753, __PRETTY_FUNCTION__))
;
1754 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1755 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1756 Inst.addOperand(MCOperand::createImm(IsSigned));
1757 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1758 }
1759
1760 // For 8-bit load/store instructions with a register offset, both the
1761 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1762 // they're disambiguated by whether the shift was explicit or implicit rather
1763 // than its size.
1764 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1765 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1765, __PRETTY_FUNCTION__))
;
1766 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1767 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1768 Inst.addOperand(MCOperand::createImm(IsSigned));
1769 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1770 }
1771
1772 template<int Shift>
1773 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1774 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1774, __PRETTY_FUNCTION__))
;
1775
1776 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1777 uint64_t Value = CE->getValue();
1778 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1779 }
1780
1781 template<int Shift>
1782 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1783 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1783, __PRETTY_FUNCTION__))
;
1784
1785 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1786 uint64_t Value = CE->getValue();
1787 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1788 }
1789
1790 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1791 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1791, __PRETTY_FUNCTION__))
;
1792 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1793 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1794 }
1795
1796 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1797 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1797, __PRETTY_FUNCTION__))
;
1798 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1799 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1800 }
1801
1802 void print(raw_ostream &OS) const override;
1803
1804 static std::unique_ptr<AArch64Operand>
1805 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1806 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1807 Op->Tok.Data = Str.data();
1808 Op->Tok.Length = Str.size();
1809 Op->Tok.IsSuffix = IsSuffix;
1810 Op->StartLoc = S;
1811 Op->EndLoc = S;
1812 return Op;
1813 }
1814
1815 static std::unique_ptr<AArch64Operand>
1816 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1817 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1818 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1819 unsigned ShiftAmount = 0,
1820 unsigned HasExplicitAmount = false) {
1821 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1822 Op->Reg.RegNum = RegNum;
1823 Op->Reg.Kind = Kind;
1824 Op->Reg.ElementWidth = 0;
1825 Op->Reg.EqualityTy = EqTy;
1826 Op->Reg.ShiftExtend.Type = ExtTy;
1827 Op->Reg.ShiftExtend.Amount = ShiftAmount;
1828 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1829 Op->StartLoc = S;
1830 Op->EndLoc = E;
1831 return Op;
1832 }
1833
1834 static std::unique_ptr<AArch64Operand>
1835 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1836 SMLoc S, SMLoc E, MCContext &Ctx,
1837 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1838 unsigned ShiftAmount = 0,
1839 unsigned HasExplicitAmount = false) {
1840 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1842, __PRETTY_FUNCTION__))
1841 Kind == RegKind::SVEPredicateVector) &&(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1842, __PRETTY_FUNCTION__))
1842 "Invalid vector kind")(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1842, __PRETTY_FUNCTION__))
;
1843 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1844 HasExplicitAmount);
1845 Op->Reg.ElementWidth = ElementWidth;
1846 return Op;
1847 }
1848
1849 static std::unique_ptr<AArch64Operand>
1850 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1851 unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1852 MCContext &Ctx) {
1853 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
1854 Op->VectorList.RegNum = RegNum;
1855 Op->VectorList.Count = Count;
1856 Op->VectorList.NumElements = NumElements;
1857 Op->VectorList.ElementWidth = ElementWidth;
1858 Op->VectorList.RegisterKind = RegisterKind;
1859 Op->StartLoc = S;
1860 Op->EndLoc = E;
1861 return Op;
1862 }
1863
1864 static std::unique_ptr<AArch64Operand>
1865 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1866 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1867 Op->VectorIndex.Val = Idx;
1868 Op->StartLoc = S;
1869 Op->EndLoc = E;
1870 return Op;
1871 }
1872
1873 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1874 SMLoc E, MCContext &Ctx) {
1875 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
1876 Op->Imm.Val = Val;
1877 Op->StartLoc = S;
1878 Op->EndLoc = E;
1879 return Op;
1880 }
1881
1882 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1883 unsigned ShiftAmount,
1884 SMLoc S, SMLoc E,
1885 MCContext &Ctx) {
1886 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1887 Op->ShiftedImm .Val = Val;
1888 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1889 Op->StartLoc = S;
1890 Op->EndLoc = E;
1891 return Op;
1892 }
1893
1894 static std::unique_ptr<AArch64Operand>
1895 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1896 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
1897 Op->CondCode.Code = Code;
1898 Op->StartLoc = S;
1899 Op->EndLoc = E;
1900 return Op;
1901 }
1902
1903 static std::unique_ptr<AArch64Operand>
1904 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1905 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
1906 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1907 Op->FPImm.IsExact = IsExact;
1908 Op->StartLoc = S;
1909 Op->EndLoc = S;
1910 return Op;
1911 }
1912
1913 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1914 StringRef Str,
1915 SMLoc S,
1916 MCContext &Ctx) {
1917 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
1918 Op->Barrier.Val = Val;
1919 Op->Barrier.Data = Str.data();
1920 Op->Barrier.Length = Str.size();
1921 Op->StartLoc = S;
1922 Op->EndLoc = S;
1923 return Op;
1924 }
1925
1926 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1927 uint32_t MRSReg,
1928 uint32_t MSRReg,
1929 uint32_t PStateField,
1930 MCContext &Ctx) {
1931 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
1932 Op->SysReg.Data = Str.data();
1933 Op->SysReg.Length = Str.size();
1934 Op->SysReg.MRSReg = MRSReg;
1935 Op->SysReg.MSRReg = MSRReg;
1936 Op->SysReg.PStateField = PStateField;
1937 Op->StartLoc = S;
1938 Op->EndLoc = S;
1939 return Op;
1940 }
1941
1942 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1943 SMLoc E, MCContext &Ctx) {
1944 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
1945 Op->SysCRImm.Val = Val;
1946 Op->StartLoc = S;
1947 Op->EndLoc = E;
1948 return Op;
1949 }
1950
1951 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1952 StringRef Str,
1953 SMLoc S,
1954 MCContext &Ctx) {
1955 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
1956 Op->Prefetch.Val = Val;
1957 Op->Barrier.Data = Str.data();
1958 Op->Barrier.Length = Str.size();
1959 Op->StartLoc = S;
1960 Op->EndLoc = S;
1961 return Op;
1962 }
1963
1964 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1965 StringRef Str,
1966 SMLoc S,
1967 MCContext &Ctx) {
1968 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
1969 Op->PSBHint.Val = Val;
1970 Op->PSBHint.Data = Str.data();
1971 Op->PSBHint.Length = Str.size();
1972 Op->StartLoc = S;
1973 Op->EndLoc = S;
1974 return Op;
1975 }
1976
1977 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
1978 StringRef Str,
1979 SMLoc S,
1980 MCContext &Ctx) {
1981 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
1982 Op->BTIHint.Val = Val << 1 | 32;
1983 Op->BTIHint.Data = Str.data();
1984 Op->BTIHint.Length = Str.size();
1985 Op->StartLoc = S;
1986 Op->EndLoc = S;
1987 return Op;
1988 }
1989
1990 static std::unique_ptr<AArch64Operand>
1991 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1992 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1993 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1994 Op->ShiftExtend.Type = ShOp;
1995 Op->ShiftExtend.Amount = Val;
1996 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1997 Op->StartLoc = S;
1998 Op->EndLoc = E;
1999 return Op;
2000 }
2001};
2002
2003} // end anonymous namespace.
2004
2005void AArch64Operand::print(raw_ostream &OS) const {
2006 switch (Kind) {
2007 case k_FPImm:
2008 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2009 if (!getFPImmIsExact())
2010 OS << " (inexact)";
2011 OS << ">";
2012 break;
2013 case k_Barrier: {
2014 StringRef Name = getBarrierName();
2015 if (!Name.empty())
2016 OS << "<barrier " << Name << ">";
2017 else
2018 OS << "<barrier invalid #" << getBarrier() << ">";
2019 break;
2020 }
2021 case k_Immediate:
2022 OS << *getImm();
2023 break;
2024 case k_ShiftedImm: {
2025 unsigned Shift = getShiftedImmShift();
2026 OS << "<shiftedimm ";
2027 OS << *getShiftedImmVal();
2028 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2029 break;
2030 }
2031 case k_CondCode:
2032 OS << "<condcode " << getCondCode() << ">";
2033 break;
2034 case k_VectorList: {
2035 OS << "<vectorlist ";
2036 unsigned Reg = getVectorListStart();
2037 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2038 OS << Reg + i << " ";
2039 OS << ">";
2040 break;
2041 }
2042 case k_VectorIndex:
2043 OS << "<vectorindex " << getVectorIndex() << ">";
2044 break;
2045 case k_SysReg:
2046 OS << "<sysreg: " << getSysReg() << '>';
2047 break;
2048 case k_Token:
2049 OS << "'" << getToken() << "'";
2050 break;
2051 case k_SysCR:
2052 OS << "c" << getSysCR();
2053 break;
2054 case k_Prefetch: {
2055 StringRef Name = getPrefetchName();
2056 if (!Name.empty())
2057 OS << "<prfop " << Name << ">";
2058 else
2059 OS << "<prfop invalid #" << getPrefetch() << ">";
2060 break;
2061 }
2062 case k_PSBHint:
2063 OS << getPSBHintName();
2064 break;
2065 case k_Register:
2066 OS << "<register " << getReg() << ">";
2067 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2068 break;
2069 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2070 case k_BTIHint:
2071 OS << getBTIHintName();
2072 break;
2073 case k_ShiftExtend:
2074 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2075 << getShiftExtendAmount();
2076 if (!hasShiftExtendAmount())
2077 OS << "<imp>";
2078 OS << '>';
2079 break;
2080 }
2081}
2082
2083/// @name Auto-generated Match Functions
2084/// {
2085
2086static unsigned MatchRegisterName(StringRef Name);
2087
2088/// }
2089
2090static unsigned MatchNeonVectorRegName(StringRef Name) {
2091 return StringSwitch<unsigned>(Name.lower())
2092 .Case("v0", AArch64::Q0)
2093 .Case("v1", AArch64::Q1)
2094 .Case("v2", AArch64::Q2)
2095 .Case("v3", AArch64::Q3)
2096 .Case("v4", AArch64::Q4)
2097 .Case("v5", AArch64::Q5)
2098 .Case("v6", AArch64::Q6)
2099 .Case("v7", AArch64::Q7)
2100 .Case("v8", AArch64::Q8)
2101 .Case("v9", AArch64::Q9)
2102 .Case("v10", AArch64::Q10)
2103 .Case("v11", AArch64::Q11)
2104 .Case("v12", AArch64::Q12)
2105 .Case("v13", AArch64::Q13)
2106 .Case("v14", AArch64::Q14)
2107 .Case("v15", AArch64::Q15)
2108 .Case("v16", AArch64::Q16)
2109 .Case("v17", AArch64::Q17)
2110 .Case("v18", AArch64::Q18)
2111 .Case("v19", AArch64::Q19)
2112 .Case("v20", AArch64::Q20)
2113 .Case("v21", AArch64::Q21)
2114 .Case("v22", AArch64::Q22)
2115 .Case("v23", AArch64::Q23)
2116 .Case("v24", AArch64::Q24)
2117 .Case("v25", AArch64::Q25)
2118 .Case("v26", AArch64::Q26)
2119 .Case("v27", AArch64::Q27)
2120 .Case("v28", AArch64::Q28)
2121 .Case("v29", AArch64::Q29)
2122 .Case("v30", AArch64::Q30)
2123 .Case("v31", AArch64::Q31)
2124 .Default(0);
2125}
2126
2127/// Returns an optional pair of (#elements, element-width) if Suffix
2128/// is a valid vector kind. Where the number of elements in a vector
2129/// or the vector width is implicit or explicitly unknown (but still a
2130/// valid suffix kind), 0 is used.
2131static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2132 RegKind VectorKind) {
2133 std::pair<int, int> Res = {-1, -1};
2134
2135 switch (VectorKind) {
2136 case RegKind::NeonVector:
2137 Res =
2138 StringSwitch<std::pair<int, int>>(Suffix.lower())
2139 .Case("", {0, 0})
2140 .Case(".1d", {1, 64})
2141 .Case(".1q", {1, 128})
2142 // '.2h' needed for fp16 scalar pairwise reductions
2143 .Case(".2h", {2, 16})
2144 .Case(".2s", {2, 32})
2145 .Case(".2d", {2, 64})
2146 // '.4b' is another special case for the ARMv8.2a dot product
2147 // operand
2148 .Case(".4b", {4, 8})
2149 .Case(".4h", {4, 16})
2150 .Case(".4s", {4, 32})
2151 .Case(".8b", {8, 8})
2152 .Case(".8h", {8, 16})
2153 .Case(".16b", {16, 8})
2154 // Accept the width neutral ones, too, for verbose syntax. If those
2155 // aren't used in the right places, the token operand won't match so
2156 // all will work out.
2157 .Case(".b", {0, 8})
2158 .Case(".h", {0, 16})
2159 .Case(".s", {0, 32})
2160 .Case(".d", {0, 64})
2161 .Default({-1, -1});
2162 break;
2163 case RegKind::SVEPredicateVector:
2164 case RegKind::SVEDataVector:
2165 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2166 .Case("", {0, 0})
2167 .Case(".b", {0, 8})
2168 .Case(".h", {0, 16})
2169 .Case(".s", {0, 32})
2170 .Case(".d", {0, 64})
2171 .Case(".q", {0, 128})
2172 .Default({-1, -1});
2173 break;
2174 default:
2175 llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2175)
;
2176 }
2177
2178 if (Res == std::make_pair(-1, -1))
2179 return Optional<std::pair<int, int>>();
2180
2181 return Optional<std::pair<int, int>>(Res);
2182}
2183
2184static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2185 return parseVectorKind(Suffix, VectorKind).hasValue();
2186}
2187
2188static unsigned matchSVEDataVectorRegName(StringRef Name) {
2189 return StringSwitch<unsigned>(Name.lower())
2190 .Case("z0", AArch64::Z0)
2191 .Case("z1", AArch64::Z1)
2192 .Case("z2", AArch64::Z2)
2193 .Case("z3", AArch64::Z3)
2194 .Case("z4", AArch64::Z4)
2195 .Case("z5", AArch64::Z5)
2196 .Case("z6", AArch64::Z6)
2197 .Case("z7", AArch64::Z7)
2198 .Case("z8", AArch64::Z8)
2199 .Case("z9", AArch64::Z9)
2200 .Case("z10", AArch64::Z10)
2201 .Case("z11", AArch64::Z11)
2202 .Case("z12", AArch64::Z12)
2203 .Case("z13", AArch64::Z13)
2204 .Case("z14", AArch64::Z14)
2205 .Case("z15", AArch64::Z15)
2206 .Case("z16", AArch64::Z16)
2207 .Case("z17", AArch64::Z17)
2208 .Case("z18", AArch64::Z18)
2209 .Case("z19", AArch64::Z19)
2210 .Case("z20", AArch64::Z20)
2211 .Case("z21", AArch64::Z21)
2212 .Case("z22", AArch64::Z22)
2213 .Case("z23", AArch64::Z23)
2214 .Case("z24", AArch64::Z24)
2215 .Case("z25", AArch64::Z25)
2216 .Case("z26", AArch64::Z26)
2217 .Case("z27", AArch64::Z27)
2218 .Case("z28", AArch64::Z28)
2219 .Case("z29", AArch64::Z29)
2220 .Case("z30", AArch64::Z30)
2221 .Case("z31", AArch64::Z31)
2222 .Default(0);
2223}
2224
2225static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2226 return StringSwitch<unsigned>(Name.lower())
2227 .Case("p0", AArch64::P0)
2228 .Case("p1", AArch64::P1)
2229 .Case("p2", AArch64::P2)
2230 .Case("p3", AArch64::P3)
2231 .Case("p4", AArch64::P4)
2232 .Case("p5", AArch64::P5)
2233 .Case("p6", AArch64::P6)
2234 .Case("p7", AArch64::P7)
2235 .Case("p8", AArch64::P8)
2236 .Case("p9", AArch64::P9)
2237 .Case("p10", AArch64::P10)
2238 .Case("p11", AArch64::P11)
2239 .Case("p12", AArch64::P12)
2240 .Case("p13", AArch64::P13)
2241 .Case("p14", AArch64::P14)
2242 .Case("p15", AArch64::P15)
2243 .Default(0);
2244}
2245
2246bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2247 SMLoc &EndLoc) {
2248 return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
2249}
2250
2251OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2252 SMLoc &StartLoc,
2253 SMLoc &EndLoc) {
2254 StartLoc = getLoc();
2255 auto Res = tryParseScalarRegister(RegNo);
2256 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2257 return Res;
2258}
2259
2260// Matches a register name or register alias previously defined by '.req'
2261unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2262 RegKind Kind) {
2263 unsigned RegNum = 0;
2264 if ((RegNum = matchSVEDataVectorRegName(Name)))
2265 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2266
2267 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2268 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2269
2270 if ((RegNum = MatchNeonVectorRegName(Name)))
2271 return Kind == RegKind::NeonVector ? RegNum : 0;
2272
2273 // The parsed register must be of RegKind Scalar
2274 if ((RegNum = MatchRegisterName(Name)))
2275 return Kind == RegKind::Scalar ? RegNum : 0;
2276
2277 if (!RegNum) {
2278 // Handle a few common aliases of registers.
2279 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2280 .Case("fp", AArch64::FP)
2281 .Case("lr", AArch64::LR)
2282 .Case("x31", AArch64::XZR)
2283 .Case("w31", AArch64::WZR)
2284 .Default(0))
2285 return Kind == RegKind::Scalar ? RegNum : 0;
2286
2287 // Check for aliases registered via .req. Canonicalize to lower case.
2288 // That's more consistent since register names are case insensitive, and
2289 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2290 auto Entry = RegisterReqs.find(Name.lower());
2291 if (Entry == RegisterReqs.end())
2292 return 0;
2293
2294 // set RegNum if the match is the right kind of register
2295 if (Kind == Entry->getValue().first)
2296 RegNum = Entry->getValue().second;
2297 }
2298 return RegNum;
2299}
2300
2301/// tryParseScalarRegister - Try to parse a register name. The token must be an
2302/// Identifier when called, and if it is a register name the token is eaten and
2303/// the register is added to the operand list.
2304OperandMatchResultTy
2305AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2306 MCAsmParser &Parser = getParser();
2307 const AsmToken &Tok = Parser.getTok();
2308 if (Tok.isNot(AsmToken::Identifier))
2309 return MatchOperand_NoMatch;
2310
2311 std::string lowerCase = Tok.getString().lower();
2312 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2313 if (Reg == 0)
2314 return MatchOperand_NoMatch;
2315
2316 RegNum = Reg;
2317 Parser.Lex(); // Eat identifier token.
2318 return MatchOperand_Success;
2319}
2320
2321/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2322OperandMatchResultTy
2323AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2324 MCAsmParser &Parser = getParser();
2325 SMLoc S = getLoc();
2326
2327 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2328 Error(S, "Expected cN operand where 0 <= N <= 15");
2329 return MatchOperand_ParseFail;
2330 }
2331
2332 StringRef Tok = Parser.getTok().getIdentifier();
2333 if (Tok[0] != 'c' && Tok[0] != 'C') {
2334 Error(S, "Expected cN operand where 0 <= N <= 15");
2335 return MatchOperand_ParseFail;
2336 }
2337
2338 uint32_t CRNum;
2339 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2340 if (BadNum || CRNum > 15) {
2341 Error(S, "Expected cN operand where 0 <= N <= 15");
2342 return MatchOperand_ParseFail;
2343 }
2344
2345 Parser.Lex(); // Eat identifier token.
2346 Operands.push_back(
2347 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2348 return MatchOperand_Success;
2349}
2350
2351/// tryParsePrefetch - Try to parse a prefetch operand.
2352template <bool IsSVEPrefetch>
2353OperandMatchResultTy
2354AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2355 MCAsmParser &Parser = getParser();
2356 SMLoc S = getLoc();
2357 const AsmToken &Tok = Parser.getTok();
2358
2359 auto LookupByName = [](StringRef N) {
2360 if (IsSVEPrefetch) {
2361 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2362 return Optional<unsigned>(Res->Encoding);
2363 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2364 return Optional<unsigned>(Res->Encoding);
2365 return Optional<unsigned>();
2366 };
2367
2368 auto LookupByEncoding = [](unsigned E) {
2369 if (IsSVEPrefetch) {
2370 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2371 return Optional<StringRef>(Res->Name);
2372 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2373 return Optional<StringRef>(Res->Name);
2374 return Optional<StringRef>();
2375 };
2376 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2377
2378 // Either an identifier for named values or a 5-bit immediate.
2379 // Eat optional hash.
2380 if (parseOptionalToken(AsmToken::Hash) ||
2381 Tok.is(AsmToken::Integer)) {
2382 const MCExpr *ImmVal;
2383 if (getParser().parseExpression(ImmVal))
2384 return MatchOperand_ParseFail;
2385
2386 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2387 if (!MCE) {
2388 TokError("immediate value expected for prefetch operand");
2389 return MatchOperand_ParseFail;
2390 }
2391 unsigned prfop = MCE->getValue();
2392 if (prfop > MaxVal) {
2393 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2394 "] expected");
2395 return MatchOperand_ParseFail;
2396 }
2397
2398 auto PRFM = LookupByEncoding(MCE->getValue());
2399 Operands.push_back(AArch64Operand::CreatePrefetch(
2400 prfop, PRFM.getValueOr(""), S, getContext()));
2401 return MatchOperand_Success;
2402 }
2403
2404 if (Tok.isNot(AsmToken::Identifier)) {
2405 TokError("prefetch hint expected");
2406 return MatchOperand_ParseFail;
2407 }
2408
2409 auto PRFM = LookupByName(Tok.getString());
2410 if (!PRFM) {
2411 TokError("prefetch hint expected");
2412 return MatchOperand_ParseFail;
2413 }
2414
2415 Parser.Lex(); // Eat identifier token.
2416 Operands.push_back(AArch64Operand::CreatePrefetch(
2417 *PRFM, Tok.getString(), S, getContext()));
2418 return MatchOperand_Success;
2419}
2420
2421/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2422OperandMatchResultTy
2423AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2424 MCAsmParser &Parser = getParser();
2425 SMLoc S = getLoc();
2426 const AsmToken &Tok = Parser.getTok();
2427 if (Tok.isNot(AsmToken::Identifier)) {
2428 TokError("invalid operand for instruction");
2429 return MatchOperand_ParseFail;
2430 }
2431
2432 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2433 if (!PSB) {
2434 TokError("invalid operand for instruction");
2435 return MatchOperand_ParseFail;
2436 }
2437
2438 Parser.Lex(); // Eat identifier token.
2439 Operands.push_back(AArch64Operand::CreatePSBHint(
2440 PSB->Encoding, Tok.getString(), S, getContext()));
2441 return MatchOperand_Success;
2442}
2443
2444/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2445OperandMatchResultTy
2446AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2447 MCAsmParser &Parser = getParser();
2448 SMLoc S = getLoc();
2449 const AsmToken &Tok = Parser.getTok();
2450 if (Tok.isNot(AsmToken::Identifier)) {
2451 TokError("invalid operand for instruction");
2452 return MatchOperand_ParseFail;
2453 }
2454
2455 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2456 if (!BTI) {
2457 TokError("invalid operand for instruction");
2458 return MatchOperand_ParseFail;
2459 }
2460
2461 Parser.Lex(); // Eat identifier token.
2462 Operands.push_back(AArch64Operand::CreateBTIHint(
2463 BTI->Encoding, Tok.getString(), S, getContext()));
2464 return MatchOperand_Success;
2465}
2466
2467/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2468/// instruction.
2469OperandMatchResultTy
2470AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2471 MCAsmParser &Parser = getParser();
2472 SMLoc S = getLoc();
2473 const MCExpr *Expr = nullptr;
2474
2475 if (Parser.getTok().is(AsmToken::Hash)) {
2476 Parser.Lex(); // Eat hash token.
2477 }
2478
2479 if (parseSymbolicImmVal(Expr))
2480 return MatchOperand_ParseFail;
2481
2482 AArch64MCExpr::VariantKind ELFRefKind;
2483 MCSymbolRefExpr::VariantKind DarwinRefKind;
2484 int64_t Addend;
2485 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2486 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2487 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2488 // No modifier was specified at all; this is the syntax for an ELF basic
2489 // ADRP relocation (unfortunately).
2490 Expr =
2491 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2492 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2493 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2494 Addend != 0) {
2495 Error(S, "gotpage label reference not allowed an addend");
2496 return MatchOperand_ParseFail;
2497 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2498 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2499 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2500 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2501 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2502 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2503 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2504 // The operand must be an @page or @gotpage qualified symbolref.
2505 Error(S, "page or gotpage label reference expected");
2506 return MatchOperand_ParseFail;
2507 }
2508 }
2509
2510 // We have either a label reference possibly with addend or an immediate. The
2511 // addend is a raw value here. The linker will adjust it to only reference the
2512 // page.
2513 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2514 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2515
2516 return MatchOperand_Success;
2517}
2518
2519/// tryParseAdrLabel - Parse and validate a source label for the ADR
2520/// instruction.
2521OperandMatchResultTy
2522AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2523 SMLoc S = getLoc();
2524 const MCExpr *Expr = nullptr;
2525
2526 // Leave anything with a bracket to the default for SVE
2527 if (getParser().getTok().is(AsmToken::LBrac))
2528 return MatchOperand_NoMatch;
2529
2530 if (getParser().getTok().is(AsmToken::Hash))
2531 getParser().Lex(); // Eat hash token.
2532
2533 if (parseSymbolicImmVal(Expr))
2534 return MatchOperand_ParseFail;
2535
2536 AArch64MCExpr::VariantKind ELFRefKind;
2537 MCSymbolRefExpr::VariantKind DarwinRefKind;
2538 int64_t Addend;
2539 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2540 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2541 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2542 // No modifier was specified at all; this is the syntax for an ELF basic
2543 // ADR relocation (unfortunately).
2544 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2545 } else {
2546 Error(S, "unexpected adr label");
2547 return MatchOperand_ParseFail;
2548 }
2549 }
2550
2551 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2552 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2553 return MatchOperand_Success;
2554}
2555
2556/// tryParseFPImm - A floating point immediate expression operand.
2557template<bool AddFPZeroAsLiteral>
2558OperandMatchResultTy
2559AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2560 MCAsmParser &Parser = getParser();
2561 SMLoc S = getLoc();
2562
2563 bool Hash = parseOptionalToken(AsmToken::Hash);
2564
2565 // Handle negation, as that still comes through as a separate token.
2566 bool isNegative = parseOptionalToken(AsmToken::Minus);
2567
2568 const AsmToken &Tok = Parser.getTok();
2569 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2570 if (!Hash)
2571 return MatchOperand_NoMatch;
2572 TokError("invalid floating point immediate");
2573 return MatchOperand_ParseFail;
2574 }
2575
2576 // Parse hexadecimal representation.
2577 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2578 if (Tok.getIntVal() > 255 || isNegative) {
2579 TokError("encoded floating point value out of range");
2580 return MatchOperand_ParseFail;
2581 }
2582
2583 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2584 Operands.push_back(
2585 AArch64Operand::CreateFPImm(F, true, S, getContext()));
2586 } else {
2587 // Parse FP representation.
2588 APFloat RealVal(APFloat::IEEEdouble());
2589 auto StatusOrErr =
2590 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2591 if (errorToBool(StatusOrErr.takeError())) {
2592 TokError("invalid floating point representation");
2593 return MatchOperand_ParseFail;
2594 }
2595
2596 if (isNegative)
2597 RealVal.changeSign();
2598
2599 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2600 Operands.push_back(
2601 AArch64Operand::CreateToken("#0", false, S, getContext()));
2602 Operands.push_back(
2603 AArch64Operand::CreateToken(".0", false, S, getContext()));
2604 } else
2605 Operands.push_back(AArch64Operand::CreateFPImm(
2606 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2607 }
2608
2609 Parser.Lex(); // Eat the token.
2610
2611 return MatchOperand_Success;
2612}
2613
2614/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2615/// a shift suffix, for example '#1, lsl #12'.
2616OperandMatchResultTy
2617AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2618 MCAsmParser &Parser = getParser();
2619 SMLoc S = getLoc();
2620
2621 if (Parser.getTok().is(AsmToken::Hash))
2622 Parser.Lex(); // Eat '#'
2623 else if (Parser.getTok().isNot(AsmToken::Integer))
2624 // Operand should start from # or should be integer, emit error otherwise.
2625 return MatchOperand_NoMatch;
2626
2627 const MCExpr *Imm = nullptr;
2628 if (parseSymbolicImmVal(Imm))
2629 return MatchOperand_ParseFail;
2630 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2631 SMLoc E = Parser.getTok().getLoc();
2632 Operands.push_back(
2633 AArch64Operand::CreateImm(Imm, S, E, getContext()));
2634 return MatchOperand_Success;
2635 }
2636
2637 // Eat ','
2638 Parser.Lex();
2639
2640 // The optional operand must be "lsl #N" where N is non-negative.
2641 if (!Parser.getTok().is(AsmToken::Identifier) ||
2642 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2643 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2644 return MatchOperand_ParseFail;
2645 }
2646
2647 // Eat 'lsl'
2648 Parser.Lex();
2649
2650 parseOptionalToken(AsmToken::Hash);
2651
2652 if (Parser.getTok().isNot(AsmToken::Integer)) {
2653 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2654 return MatchOperand_ParseFail;
2655 }
2656
2657 int64_t ShiftAmount = Parser.getTok().getIntVal();
2658
2659 if (ShiftAmount < 0) {
2660 Error(Parser.getTok().getLoc(), "positive shift amount required");
2661 return MatchOperand_ParseFail;
2662 }
2663 Parser.Lex(); // Eat the number
2664
2665 // Just in case the optional lsl #0 is used for immediates other than zero.
2666 if (ShiftAmount == 0 && Imm != nullptr) {
2667 SMLoc E = Parser.getTok().getLoc();
2668 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2669 return MatchOperand_Success;
2670 }
2671
2672 SMLoc E = Parser.getTok().getLoc();
2673 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2674 S, E, getContext()));
2675 return MatchOperand_Success;
2676}
2677
2678/// parseCondCodeString - Parse a Condition Code string.
2679AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2680 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2681 .Case("eq", AArch64CC::EQ)
2682 .Case("ne", AArch64CC::NE)
2683 .Case("cs", AArch64CC::HS)
2684 .Case("hs", AArch64CC::HS)
2685 .Case("cc", AArch64CC::LO)
2686 .Case("lo", AArch64CC::LO)
2687 .Case("mi", AArch64CC::MI)
2688 .Case("pl", AArch64CC::PL)
2689 .Case("vs", AArch64CC::VS)
2690 .Case("vc", AArch64CC::VC)
2691 .Case("hi", AArch64CC::HI)
2692 .Case("ls", AArch64CC::LS)
2693 .Case("ge", AArch64CC::GE)
2694 .Case("lt", AArch64CC::LT)
2695 .Case("gt", AArch64CC::GT)
2696 .Case("le", AArch64CC::LE)
2697 .Case("al", AArch64CC::AL)
2698 .Case("nv", AArch64CC::NV)
2699 .Default(AArch64CC::Invalid);
2700
2701 if (CC == AArch64CC::Invalid &&
2702 getSTI().getFeatureBits()[AArch64::FeatureSVE])
2703 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2704 .Case("none", AArch64CC::EQ)
2705 .Case("any", AArch64CC::NE)
2706 .Case("nlast", AArch64CC::HS)
2707 .Case("last", AArch64CC::LO)
2708 .Case("first", AArch64CC::MI)
2709 .Case("nfrst", AArch64CC::PL)
2710 .Case("pmore", AArch64CC::HI)
2711 .Case("plast", AArch64CC::LS)
2712 .Case("tcont", AArch64CC::GE)
2713 .Case("tstop", AArch64CC::LT)
2714 .Default(AArch64CC::Invalid);
2715
2716 return CC;
2717}
2718
2719/// parseCondCode - Parse a Condition Code operand.
2720bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2721 bool invertCondCode) {
2722 MCAsmParser &Parser = getParser();
2723 SMLoc S = getLoc();
2724 const AsmToken &Tok = Parser.getTok();
2725 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")((Tok.is(AsmToken::Identifier) && "Token is not an Identifier"
) ? static_cast<void> (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2725, __PRETTY_FUNCTION__))
;
2726
2727 StringRef Cond = Tok.getString();
2728 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2729 if (CC == AArch64CC::Invalid)
2730 return TokError("invalid condition code");
2731 Parser.Lex(); // Eat identifier token.
2732
2733 if (invertCondCode) {
2734 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2735 return TokError("condition codes AL and NV are invalid for this instruction");
2736 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2737 }
2738
2739 Operands.push_back(
2740 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2741 return false;
2742}
2743
2744/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2745/// them if present.
2746OperandMatchResultTy
2747AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2748 MCAsmParser &Parser = getParser();
2749 const AsmToken &Tok = Parser.getTok();
2750 std::string LowerID = Tok.getString().lower();
2751 AArch64_AM::ShiftExtendType ShOp =
2752 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2753 .Case("lsl", AArch64_AM::LSL)
2754 .Case("lsr", AArch64_AM::LSR)
2755 .Case("asr", AArch64_AM::ASR)
2756 .Case("ror", AArch64_AM::ROR)
2757 .Case("msl", AArch64_AM::MSL)
2758 .Case("uxtb", AArch64_AM::UXTB)
2759 .Case("uxth", AArch64_AM::UXTH)
2760 .Case("uxtw", AArch64_AM::UXTW)
2761 .Case("uxtx", AArch64_AM::UXTX)
2762 .Case("sxtb", AArch64_AM::SXTB)
2763 .Case("sxth", AArch64_AM::SXTH)
2764 .Case("sxtw", AArch64_AM::SXTW)
2765 .Case("sxtx", AArch64_AM::SXTX)
2766 .Default(AArch64_AM::InvalidShiftExtend);
2767
2768 if (ShOp == AArch64_AM::InvalidShiftExtend)
2769 return MatchOperand_NoMatch;
2770
2771 SMLoc S = Tok.getLoc();
2772 Parser.Lex();
2773
2774 bool Hash = parseOptionalToken(AsmToken::Hash);
2775
2776 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2777 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2778 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2779 ShOp == AArch64_AM::MSL) {
2780 // We expect a number here.
2781 TokError("expected #imm after shift specifier");
2782 return MatchOperand_ParseFail;
2783 }
2784
2785 // "extend" type operations don't need an immediate, #0 is implicit.
2786 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2787 Operands.push_back(
2788 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2789 return MatchOperand_Success;
2790 }
2791
2792 // Make sure we do actually have a number, identifier or a parenthesized
2793 // expression.
2794 SMLoc E = Parser.getTok().getLoc();
2795 if (!Parser.getTok().is(AsmToken::Integer) &&
2796 !Parser.getTok().is(AsmToken::LParen) &&
2797 !Parser.getTok().is(AsmToken::Identifier)) {
2798 Error(E, "expected integer shift amount");
2799 return MatchOperand_ParseFail;
2800 }
2801
2802 const MCExpr *ImmVal;
2803 if (getParser().parseExpression(ImmVal))
2804 return MatchOperand_ParseFail;
2805
2806 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2807 if (!MCE) {
2808 Error(E, "expected constant '#imm' after shift specifier");
2809 return MatchOperand_ParseFail;
2810 }
2811
2812 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2813 Operands.push_back(AArch64Operand::CreateShiftExtend(
2814 ShOp, MCE->getValue(), true, S, E, getContext()));
2815 return MatchOperand_Success;
2816}
2817
2818static const struct Extension {
2819 const char *Name;
2820 const FeatureBitset Features;
2821} ExtensionMap[] = {
2822 {"crc", {AArch64::FeatureCRC}},
2823 {"sm4", {AArch64::FeatureSM4}},
2824 {"sha3", {AArch64::FeatureSHA3}},
2825 {"sha2", {AArch64::FeatureSHA2}},
2826 {"aes", {AArch64::FeatureAES}},
2827 {"crypto", {AArch64::FeatureCrypto}},
2828 {"fp", {AArch64::FeatureFPARMv8}},
2829 {"simd", {AArch64::FeatureNEON}},
2830 {"ras", {AArch64::FeatureRAS}},
2831 {"lse", {AArch64::FeatureLSE}},
2832 {"predres", {AArch64::FeaturePredRes}},
2833 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
2834 {"mte", {AArch64::FeatureMTE}},
2835 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
2836 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
2837 {"ccpp", {AArch64::FeatureCCPP}},
2838 {"sve", {AArch64::FeatureSVE}},
2839 {"sve2", {AArch64::FeatureSVE2}},
2840 {"sve2-aes", {AArch64::FeatureSVE2AES}},
2841 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
2842 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
2843 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
2844 // FIXME: Unsupported extensions
2845 {"pan", {}},
2846 {"lor", {}},
2847 {"rdma", {}},
2848 {"profile", {}},
2849};
2850
2851static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2852 if (FBS[AArch64::HasV8_1aOps])
2853 Str += "ARMv8.1a";
2854 else if (FBS[AArch64::HasV8_2aOps])
2855 Str += "ARMv8.2a";
2856 else if (FBS[AArch64::HasV8_3aOps])
2857 Str += "ARMv8.3a";
2858 else if (FBS[AArch64::HasV8_4aOps])
2859 Str += "ARMv8.4a";
2860 else if (FBS[AArch64::HasV8_5aOps])
2861 Str += "ARMv8.5a";
2862 else {
2863 auto ext = std::find_if(std::begin(ExtensionMap),
2864 std::end(ExtensionMap),
2865 [&](const Extension& e)
2866 // Use & in case multiple features are enabled
2867 { return (FBS & e.Features) != FeatureBitset(); }
2868 );
2869
2870 Str += ext != std::end(ExtensionMap) ? ext->Name : "(unknown)";
2871 }
2872}
2873
2874void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2875 SMLoc S) {
2876 const uint16_t Op2 = Encoding & 7;
2877 const uint16_t Cm = (Encoding & 0x78) >> 3;
2878 const uint16_t Cn = (Encoding & 0x780) >> 7;
2879 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2880
2881 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2882
2883 Operands.push_back(
2884 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2885 Operands.push_back(
2886 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2887 Operands.push_back(
2888 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2889 Expr = MCConstantExpr::create(Op2, getContext());
2890 Operands.push_back(
2891 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2892}
2893
2894/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2895/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2896bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2897 OperandVector &Operands) {
2898 if (Name.find('.') != StringRef::npos)
2899 return TokError("invalid operand");
2900
2901 Mnemonic = Name;
2902 Operands.push_back(
2903 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2904
2905 MCAsmParser &Parser = getParser();
2906 const AsmToken &Tok = Parser.getTok();
2907 StringRef Op = Tok.getString();
2908 SMLoc S = Tok.getLoc();
2909
2910 if (Mnemonic == "ic") {
2911 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2912 if (!IC)
2913 return TokError("invalid operand for IC instruction");
2914 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2915 std::string Str("IC " + std::string(IC->Name) + " requires ");
2916 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2917 return TokError(Str.c_str());
2918 }
2919 createSysAlias(IC->Encoding, Operands, S);
2920 } else if (Mnemonic == "dc") {
2921 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2922 if (!DC)
2923 return TokError("invalid operand for DC instruction");
2924 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2925 std::string Str("DC " + std::string(DC->Name) + " requires ");
2926 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
2927 return TokError(Str.c_str());
2928 }
2929 createSysAlias(DC->Encoding, Operands, S);
2930 } else if (Mnemonic == "at") {
2931 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2932 if (!AT)
2933 return TokError("invalid operand for AT instruction");
2934 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2935 std::string Str("AT " + std::string(AT->Name) + " requires ");
2936 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
2937 return TokError(Str.c_str());
2938 }
2939 createSysAlias(AT->Encoding, Operands, S);
2940 } else if (Mnemonic == "tlbi") {
2941 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2942 if (!TLBI)
2943 return TokError("invalid operand for TLBI instruction");
2944 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2945 std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2946 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
2947 return TokError(Str.c_str());
2948 }
2949 createSysAlias(TLBI->Encoding, Operands, S);
2950 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
2951 const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
2952 if (!PRCTX)
2953 return TokError("invalid operand for prediction restriction instruction");
2954 else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
2955 std::string Str(
2956 Mnemonic.upper() + std::string(PRCTX->Name) + " requires ");
2957 setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
2958 return TokError(Str.c_str());
2959 }
2960 uint16_t PRCTX_Op2 =
2961 Mnemonic == "cfp" ? 4 :
2962 Mnemonic == "dvp" ? 5 :
2963 Mnemonic == "cpp" ? 7 :
2964 0;
2965 assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction")((PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction"
) ? static_cast<void> (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2965, __PRETTY_FUNCTION__))
;
2966 createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
2967 }
2968
2969 Parser.Lex(); // Eat operand.
2970
2971 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2972 bool HasRegister = false;
2973
2974 // Check for the optional register operand.
2975 if (parseOptionalToken(AsmToken::Comma)) {
2976 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2977 return TokError("expected register operand");
2978 HasRegister = true;
2979 }
2980
2981 if (ExpectRegister && !HasRegister)
2982 return TokError("specified " + Mnemonic + " op requires a register");
2983 else if (!ExpectRegister && HasRegister)
2984 return TokError("specified " + Mnemonic + " op does not use a register");
2985
2986 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2987 return true;
2988
2989 return false;
2990}
2991
2992OperandMatchResultTy
2993AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2994 MCAsmParser &Parser = getParser();
2995 const AsmToken &Tok = Parser.getTok();
2996
2997 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
2998 TokError("'csync' operand expected");
2999 return MatchOperand_ParseFail;
3000 // Can be either a #imm style literal or an option name
3001 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3002 // Immediate operand.
3003 const MCExpr *ImmVal;
3004 SMLoc ExprLoc = getLoc();
3005 if (getParser().parseExpression(ImmVal))
3006 return MatchOperand_ParseFail;
3007 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3008 if (!MCE) {
3009 Error(ExprLoc, "immediate value expected for barrier operand");
3010 return MatchOperand_ParseFail;
3011 }
3012 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
3013 Error(ExprLoc, "barrier operand out of range");
3014 return MatchOperand_ParseFail;
3015 }
3016 auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
3017 Operands.push_back(AArch64Operand::CreateBarrier(
3018 MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
3019 return MatchOperand_Success;
3020 }
3021
3022 if (Tok.isNot(AsmToken::Identifier)) {
3023 TokError("invalid operand for instruction");
3024 return MatchOperand_ParseFail;
3025 }
3026
3027 auto TSB = AArch64TSB::lookupTSBByName(Tok.getString());
3028 // The only valid named option for ISB is 'sy'
3029 auto DB = AArch64DB::lookupDBByName(Tok.getString());
3030 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3031 TokError("'sy' or #imm operand expected");
3032 return MatchOperand_ParseFail;
3033 // The only valid named option for TSB is 'csync'
3034 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3035 TokError("'csync' operand expected");
3036 return MatchOperand_ParseFail;
3037 } else if (!DB && !TSB) {
3038 TokError("invalid barrier option name");
3039 return MatchOperand_ParseFail;
3040 }
3041
3042 Operands.push_back(AArch64Operand::CreateBarrier(
3043 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext()));
3044 Parser.Lex(); // Consume the option
3045
3046 return MatchOperand_Success;
3047}
3048
3049OperandMatchResultTy
3050AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3051 MCAsmParser &Parser = getParser();
3052 const AsmToken &Tok = Parser.getTok();
3053
3054 if (Tok.isNot(AsmToken::Identifier))
3055 return MatchOperand_NoMatch;
3056
3057 int MRSReg, MSRReg;
3058 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3059 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3060 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3061 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3062 } else
3063 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3064
3065 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3066 unsigned PStateImm = -1;
3067 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3068 PStateImm = PState->Encoding;
3069
3070 Operands.push_back(
3071 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3072 PStateImm, getContext()));
3073 Parser.Lex(); // Eat identifier
3074
3075 return MatchOperand_Success;
3076}
3077
3078/// tryParseNeonVectorRegister - Parse a vector register operand.
3079bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3080 MCAsmParser &Parser = getParser();
3081 if (Parser.getTok().isNot(AsmToken::Identifier))
3082 return true;
3083
3084 SMLoc S = getLoc();
3085 // Check for a vector register specifier first.
3086 StringRef Kind;
3087 unsigned Reg;
3088 OperandMatchResultTy Res =
3089 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3090 if (Res != MatchOperand_Success)
3091 return true;
3092
3093 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3094 if (!KindRes)
3095 return true;
3096
3097 unsigned ElementWidth = KindRes->second;
3098 Operands.push_back(
3099 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3100 S, getLoc(), getContext()));
3101
3102 // If there was an explicit qualifier, that goes on as a literal text
3103 // operand.
3104 if (!Kind.empty())
3105 Operands.push_back(
3106 AArch64Operand::CreateToken(Kind, false, S, getContext()));
3107
3108 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3109}
3110
3111OperandMatchResultTy
3112AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3113 SMLoc SIdx = getLoc();
3114 if (parseOptionalToken(AsmToken::LBrac)) {
3115 const MCExpr *ImmVal;
3116 if (getParser().parseExpression(ImmVal))
3117 return MatchOperand_NoMatch;
3118 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3119 if (!MCE) {
3120 TokError("immediate value expected for vector index");
3121 return MatchOperand_ParseFail;;
3122 }
3123
3124 SMLoc E = getLoc();
3125
3126 if (parseToken(AsmToken::RBrac, "']' expected"))
3127 return MatchOperand_ParseFail;;
3128
3129 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3130 E, getContext()));
3131 return MatchOperand_Success;
3132 }
3133
3134 return MatchOperand_NoMatch;
3135}
3136
3137// tryParseVectorRegister - Try to parse a vector register name with
3138// optional kind specifier. If it is a register specifier, eat the token
3139// and return it.
3140OperandMatchResultTy
3141AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3142 RegKind MatchKind) {
3143 MCAsmParser &Parser = getParser();
3144 const AsmToken &Tok = Parser.getTok();
3145
3146 if (Tok.isNot(AsmToken::Identifier))
3147 return MatchOperand_NoMatch;
3148
3149 StringRef Name = Tok.getString();
3150 // If there is a kind specifier, it's separated from the register name by
3151 // a '.'.
3152 size_t Start = 0, Next = Name.find('.');
3153 StringRef Head = Name.slice(Start, Next);
3154 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3155
3156 if (RegNum) {
3157 if (Next != StringRef::npos) {
3158 Kind = Name.slice(Next, StringRef::npos);
3159 if (!isValidVectorKind(Kind, MatchKind)) {
3160 TokError("invalid vector kind qualifier");
3161 return MatchOperand_ParseFail;
3162 }
3163 }
3164 Parser.Lex(); // Eat the register token.
3165
3166 Reg = RegNum;
3167 return MatchOperand_Success;
3168 }
3169
3170 return MatchOperand_NoMatch;
3171}
3172
3173/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3174OperandMatchResultTy
3175AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3176 // Check for a SVE predicate register specifier first.
3177 const SMLoc S = getLoc();
3178 StringRef Kind;
3179 unsigned RegNum;
3180 auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3181 if (Res != MatchOperand_Success)
3182 return Res;
3183
3184 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3185 if (!KindRes)
3186 return MatchOperand_NoMatch;
3187
3188 unsigned ElementWidth = KindRes->second;
3189 Operands.push_back(AArch64Operand::CreateVectorReg(
3190 RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3191 getLoc(), getContext()));
3192
3193 // Not all predicates are followed by a '/m' or '/z'.
3194 MCAsmParser &Parser = getParser();
3195 if (Parser.getTok().isNot(AsmToken::Slash))
3196 return MatchOperand_Success;
3197
3198 // But when they do they shouldn't have an element type suffix.
3199 if (!Kind.empty()) {
3200 Error(S, "not expecting size suffix");
3201 return MatchOperand_ParseFail;
3202 }
3203
3204 // Add a literal slash as operand
3205 Operands.push_back(
3206 AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3207
3208 Parser.Lex(); // Eat the slash.
3209
3210 // Zeroing or merging?
3211 auto Pred = Parser.getTok().getString().lower();
3212 if (Pred != "z" && Pred != "m") {
3213 Error(getLoc(), "expecting 'm' or 'z' predication");
3214 return MatchOperand_ParseFail;
3215 }
3216
3217 // Add zero/merge token.
3218 const char *ZM = Pred == "z" ? "z" : "m";
3219 Operands.push_back(
3220 AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3221
3222 Parser.Lex(); // Eat zero/merge token.
3223 return MatchOperand_Success;
3224}
3225
3226/// parseRegister - Parse a register operand.
3227bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3228 // Try for a Neon vector register.
3229 if (!tryParseNeonVectorRegister(Operands))
3230 return false;
3231
3232 // Otherwise try for a scalar register.
3233 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3234 return false;
3235
3236 return true;
3237}
3238
3239bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3240 MCAsmParser &Parser = getParser();
3241 bool HasELFModifier = false;
3242 AArch64MCExpr::VariantKind RefKind;
3243
3244 if (parseOptionalToken(AsmToken::Colon)) {
3245 HasELFModifier = true;
3246
3247 if (Parser.getTok().isNot(AsmToken::Identifier))
3248 return TokError("expect relocation specifier in operand after ':'");
3249
3250 std::string LowerCase = Parser.getTok().getIdentifier().lower();
3251 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3252 .Case("lo12", AArch64MCExpr::VK_LO12)
3253 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3254 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3255 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3256 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3257 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3258 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3259 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3260 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3261 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3262 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3263 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3264 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3265 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3266 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3267 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3268 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3269 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3270 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3271 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3272 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3273 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3274 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3275 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3276 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3277 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3278 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3279 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3280 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3281 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3282 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3283 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3284 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3285 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3286 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3287 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3288 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3289 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3290 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3291 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3292 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3293 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3294 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3295 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3296 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3297 .Default(AArch64MCExpr::VK_INVALID);
3298
3299 if (RefKind == AArch64MCExpr::VK_INVALID)
3300 return TokError("expect relocation specifier in operand after ':'");
3301
3302 Parser.Lex(); // Eat identifier
3303
3304 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3305 return true;
3306 }
3307
3308 if (getParser().parseExpression(ImmVal))
3309 return true;
3310
3311 if (HasELFModifier)
3312 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3313
3314 return false;
3315}
3316
3317template <RegKind VectorKind>
3318OperandMatchResultTy
3319AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3320 bool ExpectMatch) {
3321 MCAsmParser &Parser = getParser();
3322 if (!Parser.getTok().is(AsmToken::LCurly))
3323 return MatchOperand_NoMatch;
3324
3325 // Wrapper around parse function
3326 auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3327 bool NoMatchIsError) {
3328 auto RegTok = Parser.getTok();
3329 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3330 if (ParseRes == MatchOperand_Success) {
3331 if (parseVectorKind(Kind, VectorKind))
3332 return ParseRes;
3333 llvm_unreachable("Expected a valid vector kind")::llvm::llvm_unreachable_internal("Expected a valid vector kind"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3333)
;
3334 }
3335
3336 if (RegTok.isNot(AsmToken::Identifier) ||
3337 ParseRes == MatchOperand_ParseFail ||
3338 (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3339 Error(Loc, "vector register expected");
3340 return MatchOperand_ParseFail;
3341 }
3342
3343 return MatchOperand_NoMatch;
3344 };
3345
3346 SMLoc S = getLoc();
3347 auto LCurly = Parser.getTok();
3348 Parser.Lex(); // Eat left bracket token.
3349
3350 StringRef Kind;
3351 unsigned FirstReg;
3352 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3353
3354 // Put back the original left bracket if there was no match, so that
3355 // different types of list-operands can be matched (e.g. SVE, Neon).
3356 if (ParseRes == MatchOperand_NoMatch)
3357 Parser.getLexer().UnLex(LCurly);
3358
3359 if (ParseRes != MatchOperand_Success)
3360 return ParseRes;
3361
3362 int64_t PrevReg = FirstReg;
3363 unsigned Count = 1;
3364
3365 if (parseOptionalToken(AsmToken::Minus)) {
3366 SMLoc Loc = getLoc();
3367 StringRef NextKind;
3368
3369 unsigned Reg;
3370 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3371 if (ParseRes != MatchOperand_Success)
3372 return ParseRes;
3373
3374 // Any Kind suffices must match on all regs in the list.
3375 if (Kind != NextKind) {
3376 Error(Loc, "mismatched register size suffix");
3377 return MatchOperand_ParseFail;
3378 }
3379
3380 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3381
3382 if (Space == 0 || Space > 3) {
3383 Error(Loc, "invalid number of vectors");
3384 return MatchOperand_ParseFail;
3385 }
3386
3387 Count += Space;
3388 }
3389 else {
3390 while (parseOptionalToken(AsmToken::Comma)) {
3391 SMLoc Loc = getLoc();
3392 StringRef NextKind;
3393 unsigned Reg;
3394 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3395 if (ParseRes != MatchOperand_Success)
3396 return ParseRes;
3397
3398 // Any Kind suffices must match on all regs in the list.
3399 if (Kind != NextKind) {
3400 Error(Loc, "mismatched register size suffix");
3401 return MatchOperand_ParseFail;
3402 }
3403
3404 // Registers must be incremental (with wraparound at 31)
3405 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3406 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3407 Error(Loc, "registers must be sequential");
3408 return MatchOperand_ParseFail;
3409 }
3410
3411 PrevReg = Reg;
3412 ++Count;
3413 }
3414 }
3415
3416 if (parseToken(AsmToken::RCurly, "'}' expected"))
3417 return MatchOperand_ParseFail;
3418
3419 if (Count > 4) {
3420 Error(S, "invalid number of vectors");
3421 return MatchOperand_ParseFail;
3422 }
3423
3424 unsigned NumElements = 0;
3425 unsigned ElementWidth = 0;
3426 if (!Kind.empty()) {
3427 if (const auto &VK = parseVectorKind(Kind, VectorKind))
3428 std::tie(NumElements, ElementWidth) = *VK;
3429 }
3430
3431 Operands.push_back(AArch64Operand::CreateVectorList(
3432 FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3433 getContext()));
3434
3435 return MatchOperand_Success;
3436}
3437
3438/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3439bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3440 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3441 if (ParseRes != MatchOperand_Success)
3442 return true;
3443
3444 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3445}
3446
3447OperandMatchResultTy
3448AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3449 SMLoc StartLoc = getLoc();
3450
3451 unsigned RegNum;
3452 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3453 if (Res != MatchOperand_Success)
3454 return Res;
3455
3456 if (!parseOptionalToken(AsmToken::Comma)) {
3457 Operands.push_back(AArch64Operand::CreateReg(
3458 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3459 return MatchOperand_Success;
3460 }
3461
3462 parseOptionalToken(AsmToken::Hash);
3463
3464 if (getParser().getTok().isNot(AsmToken::Integer)) {
3465 Error(getLoc(), "index must be absent or #0");
3466 return MatchOperand_ParseFail;
3467 }
3468
3469 const MCExpr *ImmVal;
3470 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3471 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3472 Error(getLoc(), "index must be absent or #0");
3473 return MatchOperand_ParseFail;
3474 }
3475
3476 Operands.push_back(AArch64Operand::CreateReg(
3477 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3478 return MatchOperand_Success;
3479}
3480
3481template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3482OperandMatchResultTy
3483AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3484 SMLoc StartLoc = getLoc();
3485
3486 unsigned RegNum;
3487 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3488 if (Res != MatchOperand_Success)
3489 return Res;
3490
3491 // No shift/extend is the default.
3492 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3493 Operands.push_back(AArch64Operand::CreateReg(
3494 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3495 return MatchOperand_Success;
3496 }
3497
3498 // Eat the comma
3499 getParser().Lex();
3500
3501 // Match the shift
3502 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
3503 Res = tryParseOptionalShiftExtend(ExtOpnd);
3504 if (Res != MatchOperand_Success)
3505 return Res;
3506
3507 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3508 Operands.push_back(AArch64Operand::CreateReg(
3509 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3510 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3511 Ext->hasShiftExtendAmount()));
3512
3513 return MatchOperand_Success;
3514}
3515
3516bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3517 MCAsmParser &Parser = getParser();
3518
3519 // Some SVE instructions have a decoration after the immediate, i.e.
3520 // "mul vl". We parse them here and add tokens, which must be present in the
3521 // asm string in the tablegen instruction.
3522 bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3523 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3524 if (!Parser.getTok().getString().equals_lower("mul") ||
3525 !(NextIsVL || NextIsHash))
3526 return true;
3527
3528 Operands.push_back(
3529 AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3530 Parser.Lex(); // Eat the "mul"
3531
3532 if (NextIsVL) {
3533 Operands.push_back(
3534 AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3535 Parser.Lex(); // Eat the "vl"
3536 return false;
3537 }
3538
3539 if (NextIsHash) {
3540 Parser.Lex(); // Eat the #
3541 SMLoc S = getLoc();
3542
3543 // Parse immediate operand.
3544 const MCExpr *ImmVal;
3545 if (!Parser.parseExpression(ImmVal))
3546 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3547 Operands.push_back(AArch64Operand::CreateImm(
3548 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3549 getContext()));
3550 return MatchOperand_Success;
3551 }
3552 }
3553
3554 return Error(getLoc(), "expected 'vl' or '#<imm>'");
3555}
3556
3557/// parseOperand - Parse a arm instruction operand. For now this parses the
3558/// operand regardless of the mnemonic.
3559bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3560 bool invertCondCode) {
3561 MCAsmParser &Parser = getParser();
3562
3563 OperandMatchResultTy ResTy =
3564 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3565
3566 // Check if the current operand has a custom associated parser, if so, try to
3567 // custom parse the operand, or fallback to the general approach.
3568 if (ResTy == MatchOperand_Success)
3569 return false;
3570 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3571 // there was a match, but an error occurred, in which case, just return that
3572 // the operand parsing failed.
3573 if (ResTy == MatchOperand_ParseFail)
3574 return true;
3575
3576 // Nothing custom, so do general case parsing.
3577 SMLoc S, E;
3578 switch (getLexer().getKind()) {
3579 default: {
3580 SMLoc S = getLoc();
3581 const MCExpr *Expr;
3582 if (parseSymbolicImmVal(Expr))
3583 return Error(S, "invalid operand");
3584
3585 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3586 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3587 return false;
3588 }
3589 case AsmToken::LBrac: {
3590 SMLoc Loc = Parser.getTok().getLoc();
3591 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3592 getContext()));
3593 Parser.Lex(); // Eat '['
3594
3595 // There's no comma after a '[', so we can parse the next operand
3596 // immediately.
3597 return parseOperand(Operands, false, false);
3598 }
3599 case AsmToken::LCurly:
3600 return parseNeonVectorList(Operands);
3601 case AsmToken::Identifier: {
3602 // If we're expecting a Condition Code operand, then just parse that.
3603 if (isCondCode)
3604 return parseCondCode(Operands, invertCondCode);
3605
3606 // If it's a register name, parse it.
3607 if (!parseRegister(Operands))
3608 return false;
3609
3610 // See if this is a "mul vl" decoration or "mul #<int>" operand used
3611 // by SVE instructions.
3612 if (!parseOptionalMulOperand(Operands))
3613 return false;
3614
3615 // This could be an optional "shift" or "extend" operand.
3616 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3617 // We can only continue if no tokens were eaten.
3618 if (GotShift != MatchOperand_NoMatch)
3619 return GotShift;
3620
3621 // This was not a register so parse other operands that start with an
3622 // identifier (like labels) as expressions and create them as immediates.
3623 const MCExpr *IdVal;
3624 S = getLoc();
3625 if (getParser().parseExpression(IdVal))
3626 return true;
3627 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3628 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3629 return false;
3630 }
3631 case AsmToken::Integer:
3632 case AsmToken::Real:
3633 case AsmToken::Hash: {
3634 // #42 -> immediate.
3635 S = getLoc();
3636
3637 parseOptionalToken(AsmToken::Hash);
3638
3639 // Parse a negative sign
3640 bool isNegative = false;
3641 if (Parser.getTok().is(AsmToken::Minus)) {
3642 isNegative = true;
3643 // We need to consume this token only when we have a Real, otherwise
3644 // we let parseSymbolicImmVal take care of it
3645 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3646 Parser.Lex();
3647 }
3648
3649 // The only Real that should come through here is a literal #0.0 for
3650 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3651 // so convert the value.
3652 const AsmToken &Tok = Parser.getTok();
3653 if (Tok.is(AsmToken::Real)) {
3654 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3655 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3656 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3657 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3658 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3659 return TokError("unexpected floating point literal");
3660 else if (IntVal != 0 || isNegative)
3661 return TokError("expected floating-point constant #0.0");
3662 Parser.Lex(); // Eat the token.
3663
3664 Operands.push_back(
3665 AArch64Operand::CreateToken("#0", false, S, getContext()));
3666 Operands.push_back(
3667 AArch64Operand::CreateToken(".0", false, S, getContext()));
3668 return false;
3669 }
3670
3671 const MCExpr *ImmVal;
3672 if (parseSymbolicImmVal(ImmVal))
3673 return true;
3674
3675 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3676 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3677 return false;
3678 }
3679 case AsmToken::Equal: {
3680 SMLoc Loc = getLoc();
3681 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3682 return TokError("unexpected token in operand");
3683 Parser.Lex(); // Eat '='
3684 const MCExpr *SubExprVal;
3685 if (getParser().parseExpression(SubExprVal))
3686 return true;
3687
3688 if (Operands.size() < 2 ||
3689 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3690 return Error(Loc, "Only valid when first operand is register");
3691
3692 bool IsXReg =
3693 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3694 Operands[1]->getReg());
3695
3696 MCContext& Ctx = getContext();
3697 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3698 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3699 if (isa<MCConstantExpr>(SubExprVal)) {
3700 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3701 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3702 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3703 ShiftAmt += 16;
3704 Imm >>= 16;
3705 }
3706 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3707 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3708 Operands.push_back(AArch64Operand::CreateImm(
3709 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3710 if (ShiftAmt)
3711 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3712 ShiftAmt, true, S, E, Ctx));
3713 return false;
3714 }
3715 APInt Simm = APInt(64, Imm << ShiftAmt);
3716 // check if the immediate is an unsigned or signed 32-bit int for W regs
3717 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3718 return Error(Loc, "Immediate too large for register");
3719 }
3720 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3721 const MCExpr *CPLoc =
3722 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3723 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3724 return false;
3725 }
3726 }
3727}
3728
3729bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3730 const MCParsedAsmOperand &Op2) const {
3731 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3732 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3733 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3734 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3735 return MCTargetAsmParser::regsEqual(Op1, Op2);
3736
3737 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&((AOp1.isScalarReg() && AOp2.isScalarReg() &&
"Testing equality of non-scalar registers not supported") ? static_cast
<void> (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3738, __PRETTY_FUNCTION__))
3738 "Testing equality of non-scalar registers not supported")((AOp1.isScalarReg() && AOp2.isScalarReg() &&
"Testing equality of non-scalar registers not supported") ? static_cast
<void> (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3738, __PRETTY_FUNCTION__))
;
3739
3740 // Check if a registers match their sub/super register classes.
3741 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3742 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3743 if (AOp1.getRegEqualityTy() == EqualsSubReg)
3744 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3745 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3746 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3747 if (AOp2.getRegEqualityTy() == EqualsSubReg)
3748 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3749
3750 return false;
3751}
3752
3753/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3754/// operands.
3755bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3756 StringRef Name, SMLoc NameLoc,
3757 OperandVector &Operands) {
3758 MCAsmParser &Parser = getParser();
3759 Name = StringSwitch<StringRef>(Name.lower())
3760 .Case("beq", "b.eq")
3761 .Case("bne", "b.ne")
3762 .Case("bhs", "b.hs")
3763 .Case("bcs", "b.cs")
3764 .Case("blo", "b.lo")
3765 .Case("bcc", "b.cc")
3766 .Case("bmi", "b.mi")
3767 .Case("bpl", "b.pl")
3768 .Case("bvs", "b.vs")
3769 .Case("bvc", "b.vc")
3770 .Case("bhi", "b.hi")
3771 .Case("bls", "b.ls")
3772 .Case("bge", "b.ge")
3773 .Case("blt", "b.lt")
3774 .Case("bgt", "b.gt")
3775 .Case("ble", "b.le")
3776 .Case("bal", "b.al")
3777 .Case("bnv", "b.nv")
3778 .Default(Name);
3779
3780 // First check for the AArch64-specific .req directive.
3781 if (Parser.getTok().is(AsmToken::Identifier) &&
3782 Parser.getTok().getIdentifier().lower() == ".req") {
3783 parseDirectiveReq(Name, NameLoc);
3784 // We always return 'error' for this, as we're done with this
3785 // statement and don't need to match the 'instruction."
3786 return true;
3787 }
3788
3789 // Create the leading tokens for the mnemonic, split by '.' characters.
3790 size_t Start = 0, Next = Name.find('.');
3791 StringRef Head = Name.slice(Start, Next);
3792
3793 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
3794 // the SYS instruction.
3795 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
3796 Head == "cfp" || Head == "dvp" || Head == "cpp")
3797 return parseSysAlias(Head, NameLoc, Operands);
3798
3799 Operands.push_back(
3800 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3801 Mnemonic = Head;
3802
3803 // Handle condition codes for a branch mnemonic
3804 if (Head == "b" && Next != StringRef::npos) {
3805 Start = Next;
3806 Next = Name.find('.', Start + 1);
3807 Head = Name.slice(Start + 1, Next);
3808
3809 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3810 (Head.data() - Name.data()));
3811 AArch64CC::CondCode CC = parseCondCodeString(Head);
3812 if (CC == AArch64CC::Invalid)
3813 return Error(SuffixLoc, "invalid condition code");
3814 Operands.push_back(
3815 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3816 Operands.push_back(
3817 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3818 }
3819
3820 // Add the remaining tokens in the mnemonic.
3821 while (Next != StringRef::npos) {
3822 Start = Next;
3823 Next = Name.find('.', Start + 1);
3824 Head = Name.slice(Start, Next);
3825 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3826 (Head.data() - Name.data()) + 1);
3827 Operands.push_back(
3828 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3829 }
3830
3831 // Conditional compare instructions have a Condition Code operand, which needs
3832 // to be parsed and an immediate operand created.
3833 bool condCodeFourthOperand =
3834 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3835 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3836 Head == "csinc" || Head == "csinv" || Head == "csneg");
3837
3838 // These instructions are aliases to some of the conditional select
3839 // instructions. However, the condition code is inverted in the aliased
3840 // instruction.
3841 //
3842 // FIXME: Is this the correct way to handle these? Or should the parser
3843 // generate the aliased instructions directly?
3844 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3845 bool condCodeThirdOperand =
3846 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3847
3848 // Read the remaining operands.
3849 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3850
3851 unsigned N = 1;
3852 do {
3853 // Parse and remember the operand.
3854 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3855 (N == 3 && condCodeThirdOperand) ||
3856 (N == 2 && condCodeSecondOperand),
3857 condCodeSecondOperand || condCodeThirdOperand)) {
3858 return true;
3859 }
3860
3861 // After successfully parsing some operands there are two special cases to
3862 // consider (i.e. notional operands not separated by commas). Both are due
3863 // to memory specifiers:
3864 // + An RBrac will end an address for load/store/prefetch
3865 // + An '!' will indicate a pre-indexed operation.
3866 //
3867 // It's someone else's responsibility to make sure these tokens are sane
3868 // in the given context!
3869
3870 SMLoc RLoc = Parser.getTok().getLoc();
3871 if (parseOptionalToken(AsmToken::RBrac))
3872 Operands.push_back(
3873 AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3874 SMLoc ELoc = Parser.getTok().getLoc();
3875 if (parseOptionalToken(AsmToken::Exclaim))
3876 Operands.push_back(
3877 AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3878
3879 ++N;
3880 } while (parseOptionalToken(AsmToken::Comma));
3881 }
3882
3883 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3884 return true;
3885
3886 return false;
3887}
3888
3889static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
3890 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31))(((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31
)) ? static_cast<void> (0) : __assert_fail ("(ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3890, __PRETTY_FUNCTION__))
;
3891 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
3892 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
3893 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
3894 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
3895 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
3896 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
3897}
3898
3899// FIXME: This entire function is a giant hack to provide us with decent
3900// operand range validation/diagnostics until TableGen/MC can be extended
3901// to support autogeneration of this kind of validation.
3902bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
3903 SmallVectorImpl<SMLoc> &Loc) {
3904 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3905 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
3906
3907 // A prefix only applies to the instruction following it. Here we extract
3908 // prefix information for the next instruction before validating the current
3909 // one so that in the case of failure we don't erronously continue using the
3910 // current prefix.
3911 PrefixInfo Prefix = NextPrefix;
3912 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
3913
3914 // Before validating the instruction in isolation we run through the rules
3915 // applicable when it follows a prefix instruction.
3916 // NOTE: brk & hlt can be prefixed but require no additional validation.
3917 if (Prefix.isActive() &&
3918 (Inst.getOpcode() != AArch64::BRK) &&
3919 (Inst.getOpcode() != AArch64::HLT)) {
3920
3921 // Prefixed intructions must have a destructive operand.
3922 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
3923 AArch64::NotDestructive)
3924 return Error(IDLoc, "instruction is unpredictable when following a"
3925 " movprfx, suggest replacing movprfx with mov");
3926
3927 // Destination operands must match.
3928 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
3929 return Error(Loc[0], "instruction is unpredictable when following a"
3930 " movprfx writing to a different destination");
3931
3932 // Destination operand must not be used in any other location.
3933 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
3934 if (Inst.getOperand(i).isReg() &&
3935 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
3936 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
3937 return Error(Loc[0], "instruction is unpredictable when following a"
3938 " movprfx and destination also used as non-destructive"
3939 " source");
3940 }
3941
3942 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
3943 if (Prefix.isPredicated()) {
3944 int PgIdx = -1;
3945
3946 // Find the instructions general predicate.
3947 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
3948 if (Inst.getOperand(i).isReg() &&
3949 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
3950 PgIdx = i;
3951 break;
3952 }
3953
3954 // Instruction must be predicated if the movprfx is predicated.
3955 if (PgIdx == -1 ||
3956 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
3957 return Error(IDLoc, "instruction is unpredictable when following a"
3958 " predicated movprfx, suggest using unpredicated movprfx");
3959
3960 // Instruction must use same general predicate as the movprfx.
3961 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
3962 return Error(IDLoc, "instruction is unpredictable when following a"
3963 " predicated movprfx using a different general predicate");
3964
3965 // Instruction element type must match the movprfx.
3966 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
3967 return Error(IDLoc, "instruction is unpredictable when following a"
3968 " predicated movprfx with a different element size");
3969 }
3970 }
3971
3972 // Check for indexed addressing modes w/ the base register being the
3973 // same as a destination/source register or pair load where
3974 // the Rt == Rt2. All of those are undefined behaviour.
3975 switch (Inst.getOpcode()) {
3976 case AArch64::LDPSWpre:
3977 case AArch64::LDPWpost:
3978 case AArch64::LDPWpre:
3979 case AArch64::LDPXpost:
3980 case AArch64::LDPXpre: {
3981 unsigned Rt = Inst.getOperand(1).getReg();
3982 unsigned Rt2 = Inst.getOperand(2).getReg();
3983 unsigned Rn = Inst.getOperand(3).getReg();
3984 if (RI->isSubRegisterEq(Rn, Rt))
3985 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3986 "is also a destination");
3987 if (RI->isSubRegisterEq(Rn, Rt2))
3988 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3989 "is also a destination");
3990 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3991 }
3992 case AArch64::LDPDi:
3993 case AArch64::LDPQi:
3994 case AArch64::LDPSi:
3995 case AArch64::LDPSWi:
3996 case AArch64::LDPWi:
3997 case AArch64::LDPXi: {
3998 unsigned Rt = Inst.getOperand(0).getReg();
3999 unsigned Rt2 = Inst.getOperand(1).getReg();
4000 if (Rt == Rt2)
4001 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4002 break;
4003 }
4004 case AArch64::LDPDpost:
4005 case AArch64::LDPDpre:
4006 case AArch64::LDPQpost:
4007 case AArch64::LDPQpre:
4008 case AArch64::LDPSpost:
4009 case AArch64::LDPSpre:
4010 case AArch64::LDPSWpost: {
4011 unsigned Rt = Inst.getOperand(1).getReg();
4012 unsigned Rt2 = Inst.getOperand(2).getReg();
4013 if (Rt == Rt2)
4014 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4015 break;
4016 }
4017 case AArch64::STPDpost:
4018 case AArch64::STPDpre:
4019 case AArch64::STPQpost:
4020 case AArch64::STPQpre:
4021 case AArch64::STPSpost:
4022 case AArch64::STPSpre:
4023 case AArch64::STPWpost:
4024 case AArch64::STPWpre:
4025 case AArch64::STPXpost:
4026 case AArch64::STPXpre: {
4027 unsigned Rt = Inst.getOperand(1).getReg();
4028 unsigned Rt2 = Inst.getOperand(2).getReg();
4029 unsigned Rn = Inst.getOperand(3).getReg();
4030 if (RI->isSubRegisterEq(Rn, Rt))
4031 return Error(Loc[0], "unpredictable STP instruction, writeback base "
4032 "is also a source");
4033 if (RI->isSubRegisterEq(Rn, Rt2))
4034 return Error(Loc[1], "unpredictable STP instruction, writeback base "
4035 "is also a source");
4036 break;
4037 }
4038 case AArch64::LDRBBpre:
4039 case AArch64::LDRBpre:
4040 case AArch64::LDRHHpre:
4041 case AArch64::LDRHpre:
4042 case AArch64::LDRSBWpre:
4043 case AArch64::LDRSBXpre:
4044 case AArch64::LDRSHWpre:
4045 case AArch64::LDRSHXpre:
4046 case AArch64::LDRSWpre:
4047 case AArch64::LDRWpre:
4048 case AArch64::LDRXpre:
4049 case AArch64::LDRBBpost:
4050 case AArch64::LDRBpost:
4051 case AArch64::LDRHHpost:
4052 case AArch64::LDRHpost:
4053 case AArch64::LDRSBWpost:
4054 case AArch64::LDRSBXpost:
4055 case AArch64::LDRSHWpost:
4056 case AArch64::LDRSHXpost:
4057 case AArch64::LDRSWpost:
4058 case AArch64::LDRWpost:
4059 case AArch64::LDRXpost: {
4060 unsigned Rt = Inst.getOperand(1).getReg();
4061 unsigned Rn = Inst.getOperand(2).getReg();
4062 if (RI->isSubRegisterEq(Rn, Rt))
4063 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4064 "is also a source");
4065 break;
4066 }
4067 case AArch64::STRBBpost:
4068 case AArch64::STRBpost:
4069 case AArch64::STRHHpost:
4070 case AArch64::STRHpost:
4071 case AArch64::STRWpost:
4072 case AArch64::STRXpost:
4073 case AArch64::STRBBpre:
4074 case AArch64::STRBpre:
4075 case AArch64::STRHHpre:
4076 case AArch64::STRHpre:
4077 case AArch64::STRWpre:
4078 case AArch64::STRXpre: {
4079 unsigned Rt = Inst.getOperand(1).getReg();
4080 unsigned Rn = Inst.getOperand(2).getReg();
4081 if (RI->isSubRegisterEq(Rn, Rt))
4082 return Error(Loc[0], "unpredictable STR instruction, writeback base "
4083 "is also a source");
4084 break;
4085 }
4086 case AArch64::STXRB:
4087 case AArch64::STXRH:
4088 case AArch64::STXRW:
4089 case AArch64::STXRX:
4090 case AArch64::STLXRB:
4091 case AArch64::STLXRH:
4092 case AArch64::STLXRW:
4093 case AArch64::STLXRX: {
4094 unsigned Rs = Inst.getOperand(0).getReg();
4095 unsigned Rt = Inst.getOperand(1).getReg();
4096 unsigned Rn = Inst.getOperand(2).getReg();
4097 if (RI->isSubRegisterEq(Rt, Rs) ||
4098 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4099 return Error(Loc[0],
4100 "unpredictable STXR instruction, status is also a source");
4101 break;
4102 }
4103 case AArch64::STXPW:
4104 case AArch64::STXPX:
4105 case AArch64::STLXPW:
4106 case AArch64::STLXPX: {
4107 unsigned Rs = Inst.getOperand(0).getReg();
4108 unsigned Rt1 = Inst.getOperand(1).getReg();
4109 unsigned Rt2 = Inst.getOperand(2).getReg();
4110 unsigned Rn = Inst.getOperand(3).getReg();
4111 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4112 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4113 return Error(Loc[0],
4114 "unpredictable STXP instruction, status is also a source");
4115 break;
4116 }
4117 case AArch64::LDRABwriteback:
4118 case AArch64::LDRAAwriteback: {
4119 unsigned Xt = Inst.getOperand(0).getReg();
4120 unsigned Xn = Inst.getOperand(1).getReg();
4121 if (Xt == Xn)
4122 return Error(Loc[0],
4123 "unpredictable LDRA instruction, writeback base"
4124 " is also a destination");
4125 break;
4126 }
4127 }
4128
4129
4130 // Now check immediate ranges. Separate from the above as there is overlap
4131 // in the instructions being checked and this keeps the nested conditionals
4132 // to a minimum.
4133 switch (Inst.getOpcode()) {
4134 case AArch64::ADDSWri:
4135 case AArch64::ADDSXri:
4136 case AArch64::ADDWri:
4137 case AArch64::ADDXri:
4138 case AArch64::SUBSWri:
4139 case AArch64::SUBSXri:
4140 case AArch64::SUBWri:
4141 case AArch64::SUBXri: {
4142 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4143 // some slight duplication here.
4144 if (Inst.getOperand(2).isExpr()) {
4145 const MCExpr *Expr = Inst.getOperand(2).getExpr();
4146 AArch64MCExpr::VariantKind ELFRefKind;
4147 MCSymbolRefExpr::VariantKind DarwinRefKind;
4148 int64_t Addend;
4149 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4150
4151 // Only allow these with ADDXri.
4152 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4153 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4154 Inst.getOpcode() == AArch64::ADDXri)
4155 return false;
4156
4157 // Only allow these with ADDXri/ADDWri
4158 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4159 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4160 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4161 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4162 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4163 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4164 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4165 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4166 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4167 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4168 (Inst.getOpcode() == AArch64::ADDXri ||
4169 Inst.getOpcode() == AArch64::ADDWri))
4170 return false;
4171
4172 // Don't allow symbol refs in the immediate field otherwise
4173 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4174 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4175 // 'cmp w0, 'borked')
4176 return Error(Loc.back(), "invalid immediate expression");
4177 }
4178 // We don't validate more complex expressions here
4179 }
4180 return false;
4181 }
4182 default:
4183 return false;
4184 }
4185}
4186
4187static std::string AArch64MnemonicSpellCheck(StringRef S,
4188 const FeatureBitset &FBS,
4189 unsigned VariantID = 0);
4190
4191bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4192 uint64_t ErrorInfo,
4193 OperandVector &Operands) {
4194 switch (ErrCode) {
4195 case Match_InvalidTiedOperand: {
4196 RegConstraintEqualityTy EqTy =
4197 static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4198 .getRegEqualityTy();
4199 switch (EqTy) {
4200 case RegConstraintEqualityTy::EqualsSubReg:
4201 return Error(Loc, "operand must be 64-bit form of destination register");
4202 case RegConstraintEqualityTy::EqualsSuperReg:
4203 return Error(Loc, "operand must be 32-bit form of destination register");
4204 case RegConstraintEqualityTy::EqualsReg:
4205 return Error(Loc, "operand must match destination register");
4206 }
4207 llvm_unreachable("Unknown RegConstraintEqualityTy")::llvm::llvm_unreachable_internal("Unknown RegConstraintEqualityTy"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4207)
;
4208 }
4209 case Match_MissingFeature:
4210 return Error(Loc,
4211 "instruction requires a CPU feature not currently enabled");
4212 case Match_InvalidOperand:
4213 return Error(Loc, "invalid operand for instruction");
4214 case Match_InvalidSuffix:
4215 return Error(Loc, "invalid type suffix for instruction");
4216 case Match_InvalidCondCode:
4217 return Error(Loc, "expected AArch64 condition code");
4218 case Match_AddSubRegExtendSmall:
4219 return Error(Loc,
4220 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4221 case Match_AddSubRegExtendLarge:
4222 return Error(Loc,
4223 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4224 case Match_AddSubSecondSource:
4225 return Error(Loc,
4226 "expected compatible register, symbol or integer in range [0, 4095]");
4227 case Match_LogicalSecondSource:
4228 return Error(Loc, "expected compatible register or logical immediate");
4229 case Match_InvalidMovImm32Shift:
4230 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4231 case Match_InvalidMovImm64Shift:
4232 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4233 case Match_AddSubRegShift32:
4234 return Error(Loc,
4235 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4236 case Match_AddSubRegShift64:
4237 return Error(Loc,
4238 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4239 case Match_InvalidFPImm:
4240 return Error(Loc,
4241 "expected compatible register or floating-point constant");
4242 case Match_InvalidMemoryIndexedSImm6:
4243 return Error(Loc, "index must be an integer in range [-32, 31].");
4244 case Match_InvalidMemoryIndexedSImm5:
4245 return Error(Loc, "index must be an integer in range [-16, 15].");
4246 case Match_InvalidMemoryIndexed1SImm4:
4247 return Error(Loc, "index must be an integer in range [-8, 7].");
4248 case Match_InvalidMemoryIndexed2SImm4:
4249 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4250 case Match_InvalidMemoryIndexed3SImm4:
4251 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4252 case Match_InvalidMemoryIndexed4SImm4:
4253 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4254 case Match_InvalidMemoryIndexed16SImm4:
4255 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4256 case Match_InvalidMemoryIndexed1SImm6:
4257 return Error(Loc, "index must be an integer in range [-32, 31].");
4258 case Match_InvalidMemoryIndexedSImm8:
4259 return Error(Loc, "index must be an integer in range [-128, 127].");
4260 case Match_InvalidMemoryIndexedSImm9:
4261 return Error(Loc, "index must be an integer in range [-256, 255].");
4262 case Match_InvalidMemoryIndexed16SImm9:
4263 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4264 case Match_InvalidMemoryIndexed8SImm10:
4265 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4266 case Match_InvalidMemoryIndexed4SImm7:
4267 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4268 case Match_InvalidMemoryIndexed8SImm7:
4269 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4270 case Match_InvalidMemoryIndexed16SImm7:
4271 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4272 case Match_InvalidMemoryIndexed8UImm5:
4273 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4274 case Match_InvalidMemoryIndexed4UImm5:
4275 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4276 case Match_InvalidMemoryIndexed2UImm5:
4277 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4278 case Match_InvalidMemoryIndexed8UImm6:
4279 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4280 case Match_InvalidMemoryIndexed16UImm6:
4281 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4282 case Match_InvalidMemoryIndexed4UImm6:
4283 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4284 case Match_InvalidMemoryIndexed2UImm6:
4285 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4286 case Match_InvalidMemoryIndexed1UImm6:
4287 return Error(Loc, "index must be in range [0, 63].");
4288 case Match_InvalidMemoryWExtend8:
4289 return Error(Loc,
4290 "expected 'uxtw' or 'sxtw' with optional shift of #0");
4291 case Match_InvalidMemoryWExtend16:
4292 return Error(Loc,
4293 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4294 case Match_InvalidMemoryWExtend32:
4295 return Error(Loc,
4296 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4297 case Match_InvalidMemoryWExtend64:
4298 return Error(Loc,
4299 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4300 case Match_InvalidMemoryWExtend128:
4301 return Error(Loc,
4302 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4303 case Match_InvalidMemoryXExtend8:
4304 return Error(Loc,
4305 "expected 'lsl' or 'sxtx' with optional shift of #0");
4306 case Match_InvalidMemoryXExtend16:
4307 return Error(Loc,
4308 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4309 case Match_InvalidMemoryXExtend32:
4310 return Error(Loc,
4311 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4312 case Match_InvalidMemoryXExtend64:
4313 return Error(Loc,
4314 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4315 case Match_InvalidMemoryXExtend128:
4316 return Error(Loc,
4317 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4318 case Match_InvalidMemoryIndexed1:
4319 return Error(Loc, "index must be an integer in range [0, 4095].");
4320 case Match_InvalidMemoryIndexed2:
4321 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4322 case Match_InvalidMemoryIndexed4:
4323 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4324 case Match_InvalidMemoryIndexed8:
4325 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4326 case Match_InvalidMemoryIndexed16:
4327 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4328 case Match_InvalidImm0_1:
4329 return Error(Loc, "immediate must be an integer in range [0, 1].");
4330 case Match_InvalidImm0_7:
4331 return Error(Loc, "immediate must be an integer in range [0, 7].");
4332 case Match_InvalidImm0_15:
4333 return Error(Loc, "immediate must be an integer in range [0, 15].");
4334 case Match_InvalidImm0_31:
4335 return Error(Loc, "immediate must be an integer in range [0, 31].");
4336 case Match_InvalidImm0_63:
4337 return Error(Loc, "immediate must be an integer in range [0, 63].");
4338 case Match_InvalidImm0_127:
4339 return Error(Loc, "immediate must be an integer in range [0, 127].");
4340 case Match_InvalidImm0_255:
4341 return Error(Loc, "immediate must be an integer in range [0, 255].");
4342 case Match_InvalidImm0_65535:
4343 return Error(Loc, "immediate must be an integer in range [0, 65535].");
4344 case Match_InvalidImm1_8:
4345 return Error(Loc, "immediate must be an integer in range [1, 8].");
4346 case Match_InvalidImm1_16:
4347 return Error(Loc, "immediate must be an integer in range [1, 16].");
4348 case Match_InvalidImm1_32:
4349 return Error(Loc, "immediate must be an integer in range [1, 32].");
4350 case Match_InvalidImm1_64:
4351 return Error(Loc, "immediate must be an integer in range [1, 64].");
4352 case Match_InvalidSVEAddSubImm8:
4353 return Error(Loc, "immediate must be an integer in range [0, 255]"
4354 " with a shift amount of 0");
4355 case Match_InvalidSVEAddSubImm16:
4356 case Match_InvalidSVEAddSubImm32:
4357 case Match_InvalidSVEAddSubImm64:
4358 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4359 "multiple of 256 in range [256, 65280]");
4360 case Match_InvalidSVECpyImm8:
4361 return Error(Loc, "immediate must be an integer in range [-128, 255]"
4362 " with a shift amount of 0");
4363 case Match_InvalidSVECpyImm16:
4364 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4365 "multiple of 256 in range [-32768, 65280]");
4366 case Match_InvalidSVECpyImm32:
4367 case Match_InvalidSVECpyImm64:
4368 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4369 "multiple of 256 in range [-32768, 32512]");
4370 case Match_InvalidIndexRange1_1:
4371 return Error(Loc, "expected lane specifier '[1]'");
4372 case Match_InvalidIndexRange0_15:
4373 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4374 case Match_InvalidIndexRange0_7:
4375 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4376 case Match_InvalidIndexRange0_3:
4377 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4378 case Match_InvalidIndexRange0_1:
4379 return Error(Loc, "vector lane must be an integer in range [0, 1].");
4380 case Match_InvalidSVEIndexRange0_63:
4381 return Error(Loc, "vector lane must be an integer in range [0, 63].");
4382 case Match_InvalidSVEIndexRange0_31:
4383 return Error(Loc, "vector lane must be an integer in range [0, 31].");
4384 case Match_InvalidSVEIndexRange0_15:
4385 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4386 case Match_InvalidSVEIndexRange0_7:
4387 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4388 case Match_InvalidSVEIndexRange0_3:
4389 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4390 case Match_InvalidLabel:
4391 return Error(Loc, "expected label or encodable integer pc offset");
4392 case Match_MRS:
4393 return Error(Loc, "expected readable system register");
4394 case Match_MSR:
4395 return Error(Loc, "expected writable system register or pstate");
4396 case Match_InvalidComplexRotationEven:
4397 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4398 case Match_InvalidComplexRotationOdd:
4399 return Error(Loc, "complex rotation must be 90 or 270.");
4400 case Match_MnemonicFail: {
4401 std::string Suggestion = AArch64MnemonicSpellCheck(
4402 ((AArch64Operand &)*Operands[0]).getToken(),
4403 ComputeAvailableFeatures(STI->getFeatureBits()));
4404 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4405 }
4406 case Match_InvalidGPR64shifted8:
4407 return Error(Loc, "register must be x0..x30 or xzr, without shift");
4408 case Match_InvalidGPR64shifted16:
4409 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4410 case Match_InvalidGPR64shifted32:
4411 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4412 case Match_InvalidGPR64shifted64:
4413 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4414 case Match_InvalidGPR64NoXZRshifted8:
4415 return Error(Loc, "register must be x0..x30 without shift");
4416 case Match_InvalidGPR64NoXZRshifted16:
4417 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4418 case Match_InvalidGPR64NoXZRshifted32:
4419 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4420 case Match_InvalidGPR64NoXZRshifted64:
4421 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4422 case Match_InvalidZPR32UXTW8:
4423 case Match_InvalidZPR32SXTW8:
4424 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4425 case Match_InvalidZPR32UXTW16:
4426 case Match_InvalidZPR32SXTW16:
4427 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4428 case Match_InvalidZPR32UXTW32:
4429 case Match_InvalidZPR32SXTW32:
4430 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4431 case Match_InvalidZPR32UXTW64:
4432 case Match_InvalidZPR32SXTW64:
4433 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4434 case Match_InvalidZPR64UXTW8:
4435 case Match_InvalidZPR64SXTW8:
4436 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4437 case Match_InvalidZPR64UXTW16:
4438 case Match_InvalidZPR64SXTW16:
4439 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4440 case Match_InvalidZPR64UXTW32:
4441 case Match_InvalidZPR64SXTW32:
4442 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4443 case Match_InvalidZPR64UXTW64:
4444 case Match_InvalidZPR64SXTW64:
4445 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4446 case Match_InvalidZPR32LSL8:
4447 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4448 case Match_InvalidZPR32LSL16:
4449 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4450 case Match_InvalidZPR32LSL32:
4451 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4452 case Match_InvalidZPR32LSL64:
4453 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4454 case Match_InvalidZPR64LSL8:
4455 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4456 case Match_InvalidZPR64LSL16:
4457 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4458 case Match_InvalidZPR64LSL32:
4459 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4460 case Match_InvalidZPR64LSL64:
4461 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4462 case Match_InvalidZPR0:
4463 return Error(Loc, "expected register without element width suffix");
4464 case Match_InvalidZPR8:
4465 case Match_InvalidZPR16:
4466 case Match_InvalidZPR32:
4467 case Match_InvalidZPR64:
4468 case Match_InvalidZPR128:
4469 return Error(Loc, "invalid element width");
4470 case Match_InvalidZPR_3b8:
4471 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4472 case Match_InvalidZPR_3b16:
4473 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4474 case Match_InvalidZPR_3b32:
4475 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4476 case Match_InvalidZPR_4b16:
4477 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4478 case Match_InvalidZPR_4b32:
4479 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4480 case Match_InvalidZPR_4b64:
4481 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4482 case Match_InvalidSVEPattern:
4483 return Error(Loc, "invalid predicate pattern");
4484 case Match_InvalidSVEPredicateAnyReg:
4485 case Match_InvalidSVEPredicateBReg:
4486 case Match_InvalidSVEPredicateHReg:
4487 case Match_InvalidSVEPredicateSReg:
4488 case Match_InvalidSVEPredicateDReg:
4489 return Error(Loc, "invalid predicate register.");
4490 case Match_InvalidSVEPredicate3bAnyReg:
4491 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
4492 case Match_InvalidSVEPredicate3bBReg:
4493 return Error(Loc, "invalid restricted predicate register, expected p0.b..p7.b");
4494 case Match_InvalidSVEPredicate3bHReg:
4495 return Error(Loc, "invalid restricted predicate register, expected p0.h..p7.h");
4496 case Match_InvalidSVEPredicate3bSReg:
4497 return Error(Loc, "invalid restricted predicate register, expected p0.s..p7.s");
4498 case Match_InvalidSVEPredicate3bDReg:
4499 return Error(Loc, "invalid restricted predicate register, expected p0.d..p7.d");
4500 case Match_InvalidSVEExactFPImmOperandHalfOne:
4501 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4502 case Match_InvalidSVEExactFPImmOperandHalfTwo:
4503 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4504 case Match_InvalidSVEExactFPImmOperandZeroOne:
4505 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4506 default:
4507 llvm_unreachable("unexpected error code!")::llvm::llvm_unreachable_internal("unexpected error code!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4507)
;
4508 }
4509}
4510
4511static const char *getSubtargetFeatureName(uint64_t Val);
4512
4513bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4514 OperandVector &Operands,
4515 MCStreamer &Out,
4516 uint64_t &ErrorInfo,
4517 bool MatchingInlineAsm) {
4518 assert(!Operands.empty() && "Unexpect empty operand list!")((!Operands.empty() && "Unexpect empty operand list!"
) ? static_cast<void> (0) : __assert_fail ("!Operands.empty() && \"Unexpect empty operand list!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4518, __PRETTY_FUNCTION__))
;
4519 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4520 assert(Op.isToken() && "Leading operand should always be a mnemonic!")((Op.isToken() && "Leading operand should always be a mnemonic!"
) ? static_cast<void> (0) : __assert_fail ("Op.isToken() && \"Leading operand should always be a mnemonic!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4520, __PRETTY_FUNCTION__))
;
4521
4522 StringRef Tok = Op.getToken();
4523 unsigned NumOperands = Operands.size();
4524
4525 if (NumOperands == 4 && Tok == "lsl") {
4526 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4527 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4528 if (Op2.isScalarReg() && Op3.isImm()) {
4529 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4530 if (Op3CE) {
4531 uint64_t Op3Val = Op3CE->getValue();
4532 uint64_t NewOp3Val = 0;
4533 uint64_t NewOp4Val = 0;
4534 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4535 Op2.getReg())) {
4536 NewOp3Val = (32 - Op3Val) & 0x1f;
4537 NewOp4Val = 31 - Op3Val;
4538 } else {
4539 NewOp3Val = (64 - Op3Val) & 0x3f;
4540 NewOp4Val = 63 - Op3Val;
4541 }
4542
4543 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4544 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4545
4546 Operands[0] = AArch64Operand::CreateToken(
4547 "ubfm", false, Op.getStartLoc(), getContext());
4548 Operands.push_back(AArch64Operand::CreateImm(
4549 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4550 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4551 Op3.getEndLoc(), getContext());
4552 }
4553 }
4554 } else if (NumOperands == 4 && Tok == "bfc") {
4555 // FIXME: Horrible hack to handle BFC->BFM alias.
4556 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4557 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4558 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4559
4560 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4561 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4562 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4563
4564 if (LSBCE && WidthCE) {
4565 uint64_t LSB = LSBCE->getValue();
4566 uint64_t Width = WidthCE->getValue();
4567
4568 uint64_t RegWidth = 0;
4569 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4570 Op1.getReg()))
4571 RegWidth = 64;
4572 else
4573 RegWidth = 32;
4574
4575 if (LSB >= RegWidth)
4576 return Error(LSBOp.getStartLoc(),
4577 "expected integer in range [0, 31]");
4578 if (Width < 1 || Width > RegWidth)
4579 return Error(WidthOp.getStartLoc(),
4580 "expected integer in range [1, 32]");
4581
4582 uint64_t ImmR = 0;
4583 if (RegWidth == 32)
4584 ImmR = (32 - LSB) & 0x1f;
4585 else
4586 ImmR = (64 - LSB) & 0x3f;
4587
4588 uint64_t ImmS = Width - 1;
4589
4590 if (ImmR != 0 && ImmS >= ImmR)
4591 return Error(WidthOp.getStartLoc(),
4592 "requested insert overflows register");
4593
4594 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4595 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4596 Operands[0] = AArch64Operand::CreateToken(
4597 "bfm", false, Op.getStartLoc(), getContext());
4598 Operands[2] = AArch64Operand::CreateReg(
4599 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4600 SMLoc(), SMLoc(), getContext());
4601 Operands[3] = AArch64Operand::CreateImm(
4602 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4603 Operands.emplace_back(
4604 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4605 WidthOp.getEndLoc(), getContext()));
4606 }
4607 }
4608 } else if (NumOperands == 5) {
4609 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4610 // UBFIZ -> UBFM aliases.
4611 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4612 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4613 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4614 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4615
4616 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4617 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4618 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4619
4620 if (Op3CE && Op4CE) {
4621 uint64_t Op3Val = Op3CE->getValue();
4622 uint64_t Op4Val = Op4CE->getValue();
4623
4624 uint64_t RegWidth = 0;
4625 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4626 Op1.getReg()))
4627 RegWidth = 64;
4628 else
4629 RegWidth = 32;
4630
4631 if (Op3Val >= RegWidth)
4632 return Error(Op3.getStartLoc(),
4633 "expected integer in range [0, 31]");
4634 if (Op4Val < 1 || Op4Val > RegWidth)
4635 return Error(Op4.getStartLoc(),
4636 "expected integer in range [1, 32]");
4637
4638 uint64_t NewOp3Val = 0;
4639 if (RegWidth == 32)
4640 NewOp3Val = (32 - Op3Val) & 0x1f;
4641 else
4642 NewOp3Val = (64 - Op3Val) & 0x3f;
4643
4644 uint64_t NewOp4Val = Op4Val - 1;
4645
4646 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4647 return Error(Op4.getStartLoc(),
4648 "requested insert overflows register");
4649
4650 const MCExpr *NewOp3 =
4651 MCConstantExpr::create(NewOp3Val, getContext());
4652 const MCExpr *NewOp4 =
4653 MCConstantExpr::create(NewOp4Val, getContext());
4654 Operands[3] = AArch64Operand::CreateImm(
4655 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4656 Operands[4] = AArch64Operand::CreateImm(
4657 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4658 if (Tok == "bfi")
4659 Operands[0] = AArch64Operand::CreateToken(
4660 "bfm", false, Op.getStartLoc(), getContext());
4661 else if (Tok == "sbfiz")
4662 Operands[0] = AArch64Operand::CreateToken(
4663 "sbfm", false, Op.getStartLoc(), getContext());
4664 else if (Tok == "ubfiz")
4665 Operands[0] = AArch64Operand::CreateToken(
4666 "ubfm", false, Op.getStartLoc(), getContext());
4667 else
4668 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4668)
;
4669 }
4670 }
4671
4672 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4673 // UBFX -> UBFM aliases.
4674 } else if (NumOperands == 5 &&
4675 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4676 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4677 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4678 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4679
4680 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4681 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4682 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4683
4684 if (Op3CE && Op4CE) {
4685 uint64_t Op3Val = Op3CE->getValue();
4686 uint64_t Op4Val = Op4CE->getValue();
4687
4688 uint64_t RegWidth = 0;
4689 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4690 Op1.getReg()))
4691 RegWidth = 64;
4692 else
4693 RegWidth = 32;
4694
4695 if (Op3Val >= RegWidth)
4696 return Error(Op3.getStartLoc(),
4697 "expected integer in range [0, 31]");
4698 if (Op4Val < 1 || Op4Val > RegWidth)
4699 return Error(Op4.getStartLoc(),
4700 "expected integer in range [1, 32]");
4701
4702 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4703
4704 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4705 return Error(Op4.getStartLoc(),
4706 "requested extract overflows register");
4707
4708 const MCExpr *NewOp4 =
4709 MCConstantExpr::create(NewOp4Val, getContext());
4710 Operands[4] = AArch64Operand::CreateImm(
4711 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4712 if (Tok == "bfxil")
4713 Operands[0] = AArch64Operand::CreateToken(
4714 "bfm", false, Op.getStartLoc(), getContext());
4715 else if (Tok == "sbfx")
4716 Operands[0] = AArch64Operand::CreateToken(
4717 "sbfm", false, Op.getStartLoc(), getContext());
4718 else if (Tok == "ubfx")
4719 Operands[0] = AArch64Operand::CreateToken(
4720 "ubfm", false, Op.getStartLoc(), getContext());
4721 else
4722 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4722)
;
4723 }
4724 }
4725 }
4726 }
4727
4728 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4729 // instruction for FP registers correctly in some rare circumstances. Convert
4730 // it to a safe instruction and warn (because silently changing someone's
4731 // assembly is rude).
4732 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4733 NumOperands == 4 && Tok == "movi") {
4734 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4735 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4736 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4737 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4738 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4739 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4740 if (Suffix.lower() == ".2d" &&
4741 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4742 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4743 " correctly on this CPU, converting to equivalent movi.16b");
4744 // Switch the suffix to .16b.
4745 unsigned Idx = Op1.isToken() ? 1 : 2;
4746 Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4747 getContext());
4748 }
4749 }
4750 }
4751
4752 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4753 // InstAlias can't quite handle this since the reg classes aren't
4754 // subclasses.
4755 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4756 // The source register can be Wn here, but the matcher expects a
4757 // GPR64. Twiddle it here if necessary.
4758 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4759 if (Op.isScalarReg()) {
4760 unsigned Reg = getXRegFromWReg(Op.getReg());
4761 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4762 Op.getStartLoc(), Op.getEndLoc(),
4763 getContext());
4764 }
4765 }
4766 // FIXME: Likewise for sxt[bh] with a Xd dst operand
4767 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4768 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4769 if (Op.isScalarReg() &&
4770 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4771 Op.getReg())) {
4772 // The source register can be Wn here, but the matcher expects a
4773 // GPR64. Twiddle it here if necessary.
4774 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4775 if (Op.isScalarReg()) {
4776 unsigned Reg = getXRegFromWReg(Op.getReg());
4777 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4778 Op.getStartLoc(),
4779 Op.getEndLoc(), getContext());
4780 }
4781 }
4782 }
4783 // FIXME: Likewise for uxt[bh] with a Xd dst operand
4784 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4785 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4786 if (Op.isScalarReg() &&
4787 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4788 Op.getReg())) {
4789 // The source register can be Wn here, but the matcher expects a
4790 // GPR32. Twiddle it here if necessary.
4791 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4792 if (Op.isScalarReg()) {
4793 unsigned Reg = getWRegFromXReg(Op.getReg());
4794 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4795 Op.getStartLoc(),
4796 Op.getEndLoc(), getContext());
4797 }
4798 }
4799 }
4800
4801 MCInst Inst;
4802 FeatureBitset MissingFeatures;
4803 // First try to match against the secondary set of tables containing the
4804 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4805 unsigned MatchResult =
4806 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4807 MatchingInlineAsm, 1);
4808
4809 // If that fails, try against the alternate table containing long-form NEON:
4810 // "fadd v0.2s, v1.2s, v2.2s"
4811 if (MatchResult != Match_Success) {
4812 // But first, save the short-form match result: we can use it in case the
4813 // long-form match also fails.
4814 auto ShortFormNEONErrorInfo = ErrorInfo;
4815 auto ShortFormNEONMatchResult = MatchResult;
4816 auto ShortFormNEONMissingFeatures = MissingFeatures;
4817
4818 MatchResult =
4819 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4820 MatchingInlineAsm, 0);
4821
4822 // Now, both matches failed, and the long-form match failed on the mnemonic
4823 // suffix token operand. The short-form match failure is probably more
4824 // relevant: use it instead.
4825 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4826 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4827 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4828 MatchResult = ShortFormNEONMatchResult;
4829 ErrorInfo = ShortFormNEONErrorInfo;
4830 MissingFeatures = ShortFormNEONMissingFeatures;
4831 }
4832 }
4833
4834 switch (MatchResult) {
4835 case Match_Success: {
4836 // Perform range checking and other semantic validations
4837 SmallVector<SMLoc, 8> OperandLocs;
4838 NumOperands = Operands.size();
4839 for (unsigned i = 1; i < NumOperands; ++i)
4840 OperandLocs.push_back(Operands[i]->getStartLoc());
4841 if (validateInstruction(Inst, IDLoc, OperandLocs))
4842 return true;
4843
4844 Inst.setLoc(IDLoc);
4845 Out.emitInstruction(Inst, getSTI());
4846 return false;
4847 }
4848 case Match_MissingFeature: {
4849 assert(MissingFeatures.any() && "Unknown missing feature!")((MissingFeatures.any() && "Unknown missing feature!"
) ? static_cast<void> (0) : __assert_fail ("MissingFeatures.any() && \"Unknown missing feature!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4849, __PRETTY_FUNCTION__))
;
4850 // Special case the error message for the very common case where only
4851 // a single subtarget feature is missing (neon, e.g.).
4852 std::string Msg = "instruction requires:";
4853 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
4854 if (MissingFeatures[i]) {
4855 Msg += " ";
4856 Msg += getSubtargetFeatureName(i);
4857 }
4858 }
4859 return Error(IDLoc, Msg);
4860 }
4861 case Match_MnemonicFail:
4862 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
4863 case Match_InvalidOperand: {
4864 SMLoc ErrorLoc = IDLoc;
4865
4866 if (ErrorInfo != ~0ULL) {
4867 if (ErrorInfo >= Operands.size())
4868 return Error(IDLoc, "too few operands for instruction",
4869 SMRange(IDLoc, getTok().getLoc()));
4870
4871 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4872 if (ErrorLoc == SMLoc())
4873 ErrorLoc = IDLoc;
4874 }
4875 // If the match failed on a suffix token operand, tweak the diagnostic
4876 // accordingly.
4877 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4878 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4879 MatchResult = Match_InvalidSuffix;
4880
4881 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4882 }
4883 case Match_InvalidTiedOperand:
4884 case Match_InvalidMemoryIndexed1:
4885 case Match_InvalidMemoryIndexed2:
4886 case Match_InvalidMemoryIndexed4:
4887 case Match_InvalidMemoryIndexed8:
4888 case Match_InvalidMemoryIndexed16:
4889 case Match_InvalidCondCode:
4890 case Match_AddSubRegExtendSmall:
4891 case Match_AddSubRegExtendLarge:
4892 case Match_AddSubSecondSource:
4893 case Match_LogicalSecondSource:
4894 case Match_AddSubRegShift32:
4895 case Match_AddSubRegShift64:
4896 case Match_InvalidMovImm32Shift:
4897 case Match_InvalidMovImm64Shift:
4898 case Match_InvalidFPImm:
4899 case Match_InvalidMemoryWExtend8:
4900 case Match_InvalidMemoryWExtend16:
4901 case Match_InvalidMemoryWExtend32:
4902 case Match_InvalidMemoryWExtend64:
4903 case Match_InvalidMemoryWExtend128:
4904 case Match_InvalidMemoryXExtend8:
4905 case Match_InvalidMemoryXExtend16:
4906 case Match_InvalidMemoryXExtend32:
4907 case Match_InvalidMemoryXExtend64:
4908 case Match_InvalidMemoryXExtend128:
4909 case Match_InvalidMemoryIndexed1SImm4:
4910 case Match_InvalidMemoryIndexed2SImm4:
4911 case Match_InvalidMemoryIndexed3SImm4:
4912 case Match_InvalidMemoryIndexed4SImm4:
4913 case Match_InvalidMemoryIndexed1SImm6:
4914 case Match_InvalidMemoryIndexed16SImm4:
4915 case Match_InvalidMemoryIndexed4SImm7:
4916 case Match_InvalidMemoryIndexed8SImm7:
4917 case Match_InvalidMemoryIndexed16SImm7:
4918 case Match_InvalidMemoryIndexed8UImm5:
4919 case Match_InvalidMemoryIndexed4UImm5:
4920 case Match_InvalidMemoryIndexed2UImm5:
4921 case Match_InvalidMemoryIndexed1UImm6:
4922 case Match_InvalidMemoryIndexed2UImm6:
4923 case Match_InvalidMemoryIndexed4UImm6:
4924 case Match_InvalidMemoryIndexed8UImm6:
4925 case Match_InvalidMemoryIndexed16UImm6:
4926 case Match_InvalidMemoryIndexedSImm6:
4927 case Match_InvalidMemoryIndexedSImm5:
4928 case Match_InvalidMemoryIndexedSImm8:
4929 case Match_InvalidMemoryIndexedSImm9:
4930 case Match_InvalidMemoryIndexed16SImm9:
4931 case Match_InvalidMemoryIndexed8SImm10:
4932 case Match_InvalidImm0_1:
4933 case Match_InvalidImm0_7:
4934 case Match_InvalidImm0_15:
4935 case Match_InvalidImm0_31:
4936 case Match_InvalidImm0_63:
4937 case Match_InvalidImm0_127:
4938 case Match_InvalidImm0_255:
4939 case Match_InvalidImm0_65535:
4940 case Match_InvalidImm1_8:
4941 case Match_InvalidImm1_16:
4942 case Match_InvalidImm1_32:
4943 case Match_InvalidImm1_64:
4944 case Match_InvalidSVEAddSubImm8:
4945 case Match_InvalidSVEAddSubImm16:
4946 case Match_InvalidSVEAddSubImm32:
4947 case Match_InvalidSVEAddSubImm64:
4948 case Match_InvalidSVECpyImm8:
4949 case Match_InvalidSVECpyImm16:
4950 case Match_InvalidSVECpyImm32:
4951 case Match_InvalidSVECpyImm64:
4952 case Match_InvalidIndexRange1_1:
4953 case Match_InvalidIndexRange0_15:
4954 case Match_InvalidIndexRange0_7:
4955 case Match_InvalidIndexRange0_3:
4956 case Match_InvalidIndexRange0_1:
4957 case Match_InvalidSVEIndexRange0_63:
4958 case Match_InvalidSVEIndexRange0_31:
4959 case Match_InvalidSVEIndexRange0_15:
4960 case Match_InvalidSVEIndexRange0_7:
4961 case Match_InvalidSVEIndexRange0_3:
4962 case Match_InvalidLabel:
4963 case Match_InvalidComplexRotationEven:
4964 case Match_InvalidComplexRotationOdd:
4965 case Match_InvalidGPR64shifted8:
4966 case Match_InvalidGPR64shifted16:
4967 case Match_InvalidGPR64shifted32:
4968 case Match_InvalidGPR64shifted64:
4969 case Match_InvalidGPR64NoXZRshifted8:
4970 case Match_InvalidGPR64NoXZRshifted16:
4971 case Match_InvalidGPR64NoXZRshifted32:
4972 case Match_InvalidGPR64NoXZRshifted64:
4973 case Match_InvalidZPR32UXTW8:
4974 case Match_InvalidZPR32UXTW16:
4975 case Match_InvalidZPR32UXTW32:
4976 case Match_InvalidZPR32UXTW64:
4977 case Match_InvalidZPR32SXTW8:
4978 case Match_InvalidZPR32SXTW16:
4979 case Match_InvalidZPR32SXTW32:
4980 case Match_InvalidZPR32SXTW64:
4981 case Match_InvalidZPR64UXTW8:
4982 case Match_InvalidZPR64SXTW8:
4983 case Match_InvalidZPR64UXTW16:
4984 case Match_InvalidZPR64SXTW16:
4985 case Match_InvalidZPR64UXTW32:
4986 case Match_InvalidZPR64SXTW32:
4987 case Match_InvalidZPR64UXTW64:
4988 case Match_InvalidZPR64SXTW64:
4989 case Match_InvalidZPR32LSL8:
4990 case Match_InvalidZPR32LSL16:
4991 case Match_InvalidZPR32LSL32:
4992 case Match_InvalidZPR32LSL64:
4993 case Match_InvalidZPR64LSL8:
4994 case Match_InvalidZPR64LSL16:
4995 case Match_InvalidZPR64LSL32:
4996 case Match_InvalidZPR64LSL64:
4997 case Match_InvalidZPR0:
4998 case Match_InvalidZPR8:
4999 case Match_InvalidZPR16:
5000 case Match_InvalidZPR32:
5001 case Match_InvalidZPR64:
5002 case Match_InvalidZPR128:
5003 case Match_InvalidZPR_3b8:
5004 case Match_InvalidZPR_3b16:
5005 case Match_InvalidZPR_3b32:
5006 case Match_InvalidZPR_4b16:
5007 case Match_InvalidZPR_4b32:
5008 case Match_InvalidZPR_4b64:
5009 case Match_InvalidSVEPredicateAnyReg:
5010 case Match_InvalidSVEPattern:
5011 case Match_InvalidSVEPredicateBReg:
5012 case Match_InvalidSVEPredicateHReg:
5013 case Match_InvalidSVEPredicateSReg:
5014 case Match_InvalidSVEPredicateDReg:
5015 case Match_InvalidSVEPredicate3bAnyReg:
5016 case Match_InvalidSVEPredicate3bBReg:
5017 case Match_InvalidSVEPredicate3bHReg:
5018 case Match_InvalidSVEPredicate3bSReg:
5019 case Match_InvalidSVEPredicate3bDReg:
5020 case Match_InvalidSVEExactFPImmOperandHalfOne:
5021 case Match_InvalidSVEExactFPImmOperandHalfTwo:
5022 case Match_InvalidSVEExactFPImmOperandZeroOne:
5023 case Match_MSR:
5024 case Match_MRS: {
5025 if (ErrorInfo >= Operands.size())
5026 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5027 // Any time we get here, there's nothing fancy to do. Just get the
5028 // operand SMLoc and display the diagnostic.
5029 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5030 if (ErrorLoc == SMLoc())
5031 ErrorLoc = IDLoc;
5032 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5033 }
5034 }
5035
5036 llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5036)
;
5037}
5038
5039/// ParseDirective parses the arm specific directives
5040bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5041 const MCObjectFileInfo::Environment Format =
5042 getContext().getObjectFileInfo()->getObjectFileType();
5043 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
5044
5045 auto IDVal = DirectiveID.getIdentifier().lower();
5046 SMLoc Loc = DirectiveID.getLoc();
5047 if (IDVal == ".arch")
5048 parseDirectiveArch(Loc);
5049 else if (IDVal == ".cpu")
5050 parseDirectiveCPU(Loc);
5051 else if (IDVal == ".tlsdesccall")
5052 parseDirectiveTLSDescCall(Loc);
5053 else if (IDVal == ".ltorg" || IDVal == ".pool")
5054 parseDirectiveLtorg(Loc);
5055 else if (IDVal == ".unreq")
5056 parseDirectiveUnreq(Loc);
5057 else if (IDVal == ".inst")
5058 parseDirectiveInst(Loc);
5059 else if (IDVal == ".cfi_negate_ra_state")
5060 parseDirectiveCFINegateRAState();
5061 else if (IDVal == ".cfi_b_key_frame")
5062 parseDirectiveCFIBKeyFrame();
5063 else if (IDVal == ".arch_extension")
5064 parseDirectiveArchExtension(Loc);
5065 else if (IsMachO) {
5066 if (IDVal == MCLOHDirectiveName())
5067 parseDirectiveLOH(IDVal, Loc);
5068 else
5069 return true;
5070 } else
5071 return true;
5072 return false;
5073}
5074
5075static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
5076 SmallVector<StringRef, 4> &RequestedExtensions) {
5077 const bool NoCrypto =
5078 (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5079 "nocrypto") != std::end(RequestedExtensions));
5080 const bool Crypto =
5081 (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5082 "crypto") != std::end(RequestedExtensions));
5083
5084 if (!NoCrypto && Crypto) {
5085 switch (ArchKind) {
5086 default:
5087 // Map 'generic' (and others) to sha2 and aes, because
5088 // that was the traditional meaning of crypto.
5089 case AArch64::ArchKind::ARMV8_1A:
5090 case AArch64::ArchKind::ARMV8_2A:
5091 case AArch64::ArchKind::ARMV8_3A:
5092 RequestedExtensions.push_back("sha2");
5093 RequestedExtensions.push_back("aes");
5094 break;
5095 case AArch64::ArchKind::ARMV8_4A:
5096 case AArch64::ArchKind::ARMV8_5A:
5097 RequestedExtensions.push_back("sm4");
5098 RequestedExtensions.push_back("sha3");
5099 RequestedExtensions.push_back("sha2");
5100 RequestedExtensions.push_back("aes");
5101 break;
5102 }
5103 } else if (NoCrypto) {
5104 switch (ArchKind) {
5105 default:
5106 // Map 'generic' (and others) to sha2 and aes, because
5107 // that was the traditional meaning of crypto.
5108 case AArch64::ArchKind::ARMV8_1A:
5109 case AArch64::ArchKind::ARMV8_2A:
5110 case AArch64::ArchKind::ARMV8_3A:
5111 RequestedExtensions.push_back("nosha2");
5112 RequestedExtensions.push_back("noaes");
5113 break;
5114 case AArch64::ArchKind::ARMV8_4A:
5115 case AArch64::ArchKind::ARMV8_5A:
5116 RequestedExtensions.push_back("nosm4");
5117 RequestedExtensions.push_back("nosha3");
5118 RequestedExtensions.push_back("nosha2");
5119 RequestedExtensions.push_back("noaes");
5120 break;
5121 }
5122 }
5123}
5124
5125/// parseDirectiveArch
5126/// ::= .arch token
5127bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
5128 SMLoc ArchLoc = getLoc();
5129
5130 StringRef Arch, ExtensionString;
5131 std::tie(Arch, ExtensionString) =
5132 getParser().parseStringToEndOfStatement().trim().split('+');
5133
5134 AArch64::ArchKind ID = AArch64::parseArch(Arch);
5135 if (ID == AArch64::ArchKind::INVALID)
5136 return Error(ArchLoc, "unknown arch name");
5137
5138 if (parseToken(AsmToken::EndOfStatement))
5139 return true;
5140
5141 // Get the architecture and extension features.
5142 std::vector<StringRef> AArch64Features;
5143 AArch64::getArchFeatures(ID, AArch64Features);
5144 AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
5145 AArch64Features);
5146
5147 MCSubtargetInfo &STI = copySTI();
5148 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
5149 STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
5150
5151 SmallVector<StringRef, 4> RequestedExtensions;
5152 if (!ExtensionString.empty())
5153 ExtensionString.split(RequestedExtensions, '+');
5154
5155 ExpandCryptoAEK(ID, RequestedExtensions);
5156
5157 FeatureBitset Features = STI.getFeatureBits();
5158 for (auto Name : RequestedExtensions) {
5159 bool EnableFeature = true;
5160
5161 if (Name.startswith_lower("no")) {
5162 EnableFeature = false;
5163 Name = Name.substr(2);
5164 }
5165
5166 for (const auto &Extension : ExtensionMap) {
5167 if (Extension.Name != Name)
5168 continue;
5169
5170 if (Extension.Features.none())
5171 report_fatal_error("unsupported architectural extension: " + Name);
5172
5173 FeatureBitset ToggleFeatures = EnableFeature
5174 ? (~Features & Extension.Features)
5175 : ( Features & Extension.Features);
5176 FeatureBitset Features =
5177 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5178 setAvailableFeatures(Features);
5179 break;
5180 }
5181 }
5182 return false;
5183}
5184
5185/// parseDirectiveArchExtension
5186/// ::= .arch_extension [no]feature
5187bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
5188 SMLoc ExtLoc = getLoc();
5189
5190 StringRef Name = getParser().parseStringToEndOfStatement().trim();
5191
5192 if (parseToken(AsmToken::EndOfStatement,
5193 "unexpected token in '.arch_extension' directive"))
5194 return true;
5195
5196 bool EnableFeature = true;
5197 if (Name.startswith_lower("no")) {
5198 EnableFeature = false;
5199 Name = Name.substr(2);
5200 }
5201
5202 MCSubtargetInfo &STI = copySTI();
5203 FeatureBitset Features = STI.getFeatureBits();
5204 for (const auto &Extension : ExtensionMap) {
5205 if (Extension.Name != Name)
5206 continue;
5207
5208 if (Extension.Features.none())
5209 return Error(ExtLoc, "unsupported architectural extension: " + Name);
5210
5211 FeatureBitset ToggleFeatures = EnableFeature
5212 ? (~Features & Extension.Features)
5213 : (Features & Extension.Features);
5214 FeatureBitset Features =
5215 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5216 setAvailableFeatures(Features);
5217 return false;
5218 }
5219
5220 return Error(ExtLoc, "unknown architectural extension: " + Name);
5221}
5222
5223static SMLoc incrementLoc(SMLoc L, int Offset) {
5224 return SMLoc::getFromPointer(L.getPointer() + Offset);
5225}
5226
5227/// parseDirectiveCPU
5228/// ::= .cpu id
5229bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5230 SMLoc CurLoc = getLoc();
5231
5232 StringRef CPU, ExtensionString;
5233 std::tie(CPU, ExtensionString) =
5234 getParser().parseStringToEndOfStatement().trim().split('+');
5235
5236 if (parseToken(AsmToken::EndOfStatement))
5237 return true;
5238
5239 SmallVector<StringRef, 4> RequestedExtensions;
5240 if (!ExtensionString.empty())
5241 ExtensionString.split(RequestedExtensions, '+');
5242
5243 // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5244 // once that is tablegen'ed
5245 if (!getSTI().isCPUStringValid(CPU)) {
5246 Error(CurLoc, "unknown CPU name");
5247 return false;
5248 }
5249
5250 MCSubtargetInfo &STI = copySTI();
5251 STI.setDefaultFeatures(CPU, "");
5252 CurLoc = incrementLoc(CurLoc, CPU.size());
5253
5254 ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5255
5256 FeatureBitset Features = STI.getFeatureBits();
5257 for (auto Name : RequestedExtensions) {
5258 // Advance source location past '+'.
5259 CurLoc = incrementLoc(CurLoc, 1);
5260
5261 bool EnableFeature = true;
5262
5263 if (Name.startswith_lower("no")) {
5264 EnableFeature = false;
5265 Name = Name.substr(2);
5266 }
5267
5268 bool FoundExtension = false;
5269 for (const auto &Extension : ExtensionMap) {
5270 if (Extension.Name != Name)
5271 continue;
5272
5273 if (Extension.Features.none())
5274 report_fatal_error("unsupported architectural extension: " + Name);
5275
5276 FeatureBitset ToggleFeatures = EnableFeature
5277 ? (~Features & Extension.Features)
5278 : ( Features & Extension.Features);
5279 FeatureBitset Features =
5280 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5281 setAvailableFeatures(Features);
5282 FoundExtension = true;
5283
5284 break;
5285 }
5286
5287 if (!FoundExtension)
5288 Error(CurLoc, "unsupported architectural extension");
5289
5290 CurLoc = incrementLoc(CurLoc, Name.size());
5291 }
5292 return false;
5293}
5294
5295/// parseDirectiveInst
5296/// ::= .inst opcode [, ...]
5297bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5298 if (getLexer().is(AsmToken::EndOfStatement))
5299 return Error(Loc, "expected expression following '.inst' directive");
5300
5301 auto parseOp = [&]() -> bool {
5302 SMLoc L = getLoc();
5303 const MCExpr *Expr = nullptr;
5304 if (check(getParser().parseExpression(Expr), L, "expected expression"))
1
Assuming the condition is false
2
Taking false branch
5305 return true;
5306 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
3
Assuming null pointer is passed into cast
4
'Value' initialized to a null pointer value
5307 if (check(!Value, L, "expected constant expression"))
5
Assuming the condition is false
6
Taking false branch
5308 return true;
5309 getTargetStreamer().emitInst(Value->getValue());
7
Called C++ object pointer is null
5310 return false;
5311 };
5312
5313 if (parseMany(parseOp))
5314 return addErrorSuffix(" in '.inst' directive");
5315 return false;
5316}
5317
5318// parseDirectiveTLSDescCall:
5319// ::= .tlsdesccall symbol
5320bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5321 StringRef Name;
5322 if (check(getParser().parseIdentifier(Name), L,
5323 "expected symbol after directive") ||
5324 parseToken(AsmToken::EndOfStatement))
5325 return true;
5326
5327 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5328 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5329 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5330
5331 MCInst Inst;
5332 Inst.setOpcode(AArch64::TLSDESCCALL);
5333 Inst.addOperand(MCOperand::createExpr(Expr));
5334
5335 getParser().getStreamer().emitInstruction(Inst, getSTI());
5336 return false;
5337}
5338
5339/// ::= .loh <lohName | lohId> label1, ..., labelN
5340/// The number of arguments depends on the loh identifier.
5341bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5342 MCLOHType Kind;
5343 if (getParser().getTok().isNot(AsmToken::Identifier)) {
5344 if (getParser().getTok().isNot(AsmToken::Integer))
5345 return TokError("expected an identifier or a number in directive");
5346 // We successfully get a numeric value for the identifier.
5347 // Check if it is valid.
5348 int64_t Id = getParser().getTok().getIntVal();
5349 if (Id <= -1U && !isValidMCLOHType(Id))
5350 return TokError("invalid numeric identifier in directive");
5351 Kind = (MCLOHType)Id;
5352 } else {
5353 StringRef Name = getTok().getIdentifier();
5354 // We successfully parse an identifier.
5355 // Check if it is a recognized one.
5356 int Id = MCLOHNameToId(Name);
5357
5358 if (Id == -1)
5359 return TokError("invalid identifier in directive");
5360 Kind = (MCLOHType)Id;
5361 }
5362 // Consume the identifier.
5363 Lex();
5364 // Get the number of arguments of this LOH.
5365 int NbArgs = MCLOHIdToNbArgs(Kind);
5366
5367 assert(NbArgs != -1 && "Invalid number of arguments")((NbArgs != -1 && "Invalid number of arguments") ? static_cast
<void> (0) : __assert_fail ("NbArgs != -1 && \"Invalid number of arguments\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5367, __PRETTY_FUNCTION__))
;
5368
5369 SmallVector<MCSymbol *, 3> Args;
5370 for (int Idx = 0; Idx < NbArgs; ++Idx) {
5371 StringRef Name;
5372 if (getParser().parseIdentifier(Name))
5373 return TokError("expected identifier in directive");
5374 Args.push_back(getContext().getOrCreateSymbol(Name));
5375
5376 if (Idx + 1 == NbArgs)
5377 break;
5378 if (parseToken(AsmToken::Comma,
5379 "unexpected token in '" + Twine(IDVal) + "' directive"))
5380 return true;
5381 }
5382 if (parseToken(AsmToken::EndOfStatement,
5383 "unexpected token in '" + Twine(IDVal) + "' directive"))
5384 return true;
5385
5386 getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
5387 return false;
5388}
5389
5390/// parseDirectiveLtorg
5391/// ::= .ltorg | .pool
5392bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5393 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5394 return true;
5395 getTargetStreamer().emitCurrentConstantPool();
5396 return false;
5397}
5398
5399/// parseDirectiveReq
5400/// ::= name .req registername
5401bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5402 MCAsmParser &Parser = getParser();
5403 Parser.Lex(); // Eat the '.req' token.
5404 SMLoc SRegLoc = getLoc();
5405 RegKind RegisterKind = RegKind::Scalar;
5406 unsigned RegNum;
5407 OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5408
5409 if (ParseRes != MatchOperand_Success) {
5410 StringRef Kind;
5411 RegisterKind = RegKind::NeonVector;
5412 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5413
5414 if (ParseRes == MatchOperand_ParseFail)
5415 return true;
5416
5417 if (ParseRes == MatchOperand_Success && !Kind.empty())
5418 return Error(SRegLoc, "vector register without type specifier expected");
5419 }
5420
5421 if (ParseRes != MatchOperand_Success) {
5422 StringRef Kind;
5423 RegisterKind = RegKind::SVEDataVector;
5424 ParseRes =
5425 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5426
5427 if (ParseRes == MatchOperand_ParseFail)
5428 return true;
5429
5430 if (ParseRes == MatchOperand_Success && !Kind.empty())
5431 return Error(SRegLoc,
5432 "sve vector register without type specifier expected");
5433 }
5434
5435 if (ParseRes != MatchOperand_Success) {
5436 StringRef Kind;
5437 RegisterKind = RegKind::SVEPredicateVector;
5438 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5439
5440 if (ParseRes == MatchOperand_ParseFail)
5441 return true;
5442
5443 if (ParseRes == MatchOperand_Success && !Kind.empty())
5444 return Error(SRegLoc,
5445 "sve predicate register without type specifier expected");
5446 }
5447
5448 if (ParseRes != MatchOperand_Success)
5449 return Error(SRegLoc, "register name or alias expected");
5450
5451 // Shouldn't be anything else.
5452 if (parseToken(AsmToken::EndOfStatement,
5453 "unexpected input in .req directive"))
5454 return true;
5455
5456 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
5457 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
5458 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
5459
5460 return false;
5461}
5462
5463/// parseDirectiveUneq
5464/// ::= .unreq registername
5465bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
5466 MCAsmParser &Parser = getParser();
5467 if (getTok().isNot(AsmToken::Identifier))
5468 return TokError("unexpected input in .unreq directive.");
5469 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
5470 Parser.Lex(); // Eat the identifier.
5471 if (parseToken(AsmToken::EndOfStatement))
5472 return addErrorSuffix("in '.unreq' directive");
5473 return false;
5474}
5475
5476bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
5477 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5478 return true;
5479 getStreamer().emitCFINegateRAState();
5480 return false;
5481}
5482
5483/// parseDirectiveCFIBKeyFrame
5484/// ::= .cfi_b_key
5485bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
5486 if (parseToken(AsmToken::EndOfStatement,
5487 "unexpected token in '.cfi_b_key_frame'"))
5488 return true;
5489 getStreamer().emitCFIBKeyFrame();
5490 return false;
5491}
5492
5493bool
5494AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
5495 AArch64MCExpr::VariantKind &ELFRefKind,
5496 MCSymbolRefExpr::VariantKind &DarwinRefKind,
5497 int64_t &Addend) {
5498 ELFRefKind = AArch64MCExpr::VK_INVALID;
5499 DarwinRefKind = MCSymbolRefExpr::VK_None;
5500 Addend = 0;
5501
5502 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
5503 ELFRefKind = AE->getKind();
5504 Expr = AE->getSubExpr();
5505 }
5506
5507 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
5508 if (SE) {
5509 // It's a simple symbol reference with no addend.
5510 DarwinRefKind = SE->getKind();
5511 return true;
5512 }
5513
5514 // Check that it looks like a symbol + an addend
5515 MCValue Res;
5516 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
5517 if (!Relocatable || Res.getSymB())
5518 return false;
5519
5520 // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
5521 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
5522 if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
5523 return false;
5524
5525 if (Res.getSymA())
5526 DarwinRefKind = Res.getSymA()->getKind();
5527 Addend = Res.getConstant();
5528
5529 // It's some symbol reference + a constant addend, but really
5530 // shouldn't use both Darwin and ELF syntax.
5531 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
5532 DarwinRefKind == MCSymbolRefExpr::VK_None;
5533}
5534
5535/// Force static initialization.
5536extern "C" LLVM_EXTERNAL_VISIBILITY__attribute__ ((visibility("default"))) void LLVMInitializeAArch64AsmParser() {
5537 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
5538 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
5539 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
5540 RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target());
5541 RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target());
5542}
5543
5544#define GET_REGISTER_MATCHER
5545#define GET_SUBTARGET_FEATURE_NAME
5546#define GET_MATCHER_IMPLEMENTATION
5547#define GET_MNEMONIC_SPELL_CHECKER
5548#include "AArch64GenAsmMatcher.inc"
5549
5550// Define this matcher function after the auto-generated include so we
5551// have the match class enum definitions.
5552unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
5553 unsigned Kind) {
5554 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
5555 // If the kind is a token for a literal immediate, check if our asm
5556 // operand matches. This is for InstAliases which have a fixed-value
5557 // immediate in the syntax.
5558 int64_t ExpectedVal;
5559 switch (Kind) {
5560 default:
5561 return Match_InvalidOperand;
5562 case MCK__HASH_0:
5563 ExpectedVal = 0;
5564 break;
5565 case MCK__HASH_1:
5566 ExpectedVal = 1;
5567 break;
5568 case MCK__HASH_12:
5569 ExpectedVal = 12;
5570 break;
5571 case MCK__HASH_16:
5572 ExpectedVal = 16;
5573 break;
5574 case MCK__HASH_2:
5575 ExpectedVal = 2;
5576 break;
5577 case MCK__HASH_24:
5578 ExpectedVal = 24;
5579 break;
5580 case MCK__HASH_3:
5581 ExpectedVal = 3;
5582 break;
5583 case MCK__HASH_32:
5584 ExpectedVal = 32;
5585 break;
5586 case MCK__HASH_4:
5587 ExpectedVal = 4;
5588 break;
5589 case MCK__HASH_48:
5590 ExpectedVal = 48;
5591 break;
5592 case MCK__HASH_6:
5593 ExpectedVal = 6;
5594 break;
5595 case MCK__HASH_64:
5596 ExpectedVal = 64;
5597 break;
5598 case MCK__HASH_8:
5599 ExpectedVal = 8;
5600 break;
5601 }
5602 if (!Op.isImm())
5603 return Match_InvalidOperand;
5604 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
5605 if (!CE)
5606 return Match_InvalidOperand;
5607 if (CE->getValue() == ExpectedVal)
5608 return Match_Success;
5609 return Match_InvalidOperand;
5610}
5611
5612OperandMatchResultTy
5613AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
5614
5615 SMLoc S = getLoc();
5616
5617 if (getParser().getTok().isNot(AsmToken::Identifier)) {
5618 Error(S, "expected register");
5619 return MatchOperand_ParseFail;
5620 }
5621
5622 unsigned FirstReg;
5623 OperandMatchResultTy Res = tryParseScalarRegister(FirstReg);
5624 if (Res != MatchOperand_Success)
5625 return MatchOperand_ParseFail;
5626
5627 const MCRegisterClass &WRegClass =
5628 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
5629 const MCRegisterClass &XRegClass =
5630 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
5631
5632 bool isXReg = XRegClass.contains(FirstReg),
5633 isWReg = WRegClass.contains(FirstReg);
5634 if (!isXReg && !isWReg) {
5635 Error(S, "expected first even register of a "
5636 "consecutive same-size even/odd register pair");
5637 return MatchOperand_ParseFail;
5638 }
5639
5640 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5641 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
5642
5643 if (FirstEncoding & 0x1) {
5644 Error(S, "expected first even register of a "
5645 "consecutive same-size even/odd register pair");
5646 return MatchOperand_ParseFail;
5647 }
5648
5649 if (getParser().getTok().isNot(AsmToken::Comma)) {
5650 Error(getLoc(), "expected comma");
5651 return MatchOperand_ParseFail;
5652 }
5653 // Eat the comma
5654 getParser().Lex();
5655
5656 SMLoc E = getLoc();
5657 unsigned SecondReg;
5658 Res = tryParseScalarRegister(SecondReg);
5659 if (Res != MatchOperand_Success)
5660 return MatchOperand_ParseFail;
5661
5662 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
5663 (isXReg && !XRegClass.contains(SecondReg)) ||
5664 (isWReg && !WRegClass.contains(SecondReg))) {
5665 Error(E,"expected second odd register of a "
5666 "consecutive same-size even/odd register pair");
5667 return MatchOperand_ParseFail;
5668 }
5669
5670 unsigned Pair = 0;
5671 if (isXReg) {
5672 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
5673 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
5674 } else {
5675 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
5676 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
5677 }
5678
5679 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
5680 getLoc(), getContext()));
5681
5682 return MatchOperand_Success;
5683}
5684
5685template <bool ParseShiftExtend, bool ParseSuffix>
5686OperandMatchResultTy
5687AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
5688 const SMLoc S = getLoc();
5689 // Check for a SVE vector register specifier first.
5690 unsigned RegNum;
5691 StringRef Kind;
5692
5693 OperandMatchResultTy Res =
5694 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5695
5696 if (Res != MatchOperand_Success)
5697 return Res;
5698
5699 if (ParseSuffix && Kind.empty())
5700 return MatchOperand_NoMatch;
5701
5702 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
5703 if (!KindRes)
5704 return MatchOperand_NoMatch;
5705
5706 unsigned ElementWidth = KindRes->second;
5707
5708 // No shift/extend is the default.
5709 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
5710 Operands.push_back(AArch64Operand::CreateVectorReg(
5711 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
5712
5713 OperandMatchResultTy Res = tryParseVectorIndex(Operands);
5714 if (Res == MatchOperand_ParseFail)
5715 return MatchOperand_ParseFail;
5716 return MatchOperand_Success;
5717 }
5718
5719 // Eat the comma
5720 getParser().Lex();
5721
5722 // Match the shift
5723 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
5724 Res = tryParseOptionalShiftExtend(ExtOpnd);
5725 if (Res != MatchOperand_Success)
5726 return Res;
5727
5728 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
5729 Operands.push_back(AArch64Operand::CreateVectorReg(
5730 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
5731 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5732 Ext->hasShiftExtendAmount()));
5733
5734 return MatchOperand_Success;
5735}
5736
5737OperandMatchResultTy
5738AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
5739 MCAsmParser &Parser = getParser();
5740
5741 SMLoc SS = getLoc();
5742 const AsmToken &TokE = Parser.getTok();
5743 bool IsHash = TokE.is(AsmToken::Hash);
5744
5745 if (!IsHash && TokE.isNot(AsmToken::Identifier))
5746 return MatchOperand_NoMatch;
5747
5748 int64_t Pattern;
5749 if (IsHash) {
5750 Parser.Lex(); // Eat hash
5751
5752 // Parse the immediate operand.
5753 const MCExpr *ImmVal;
5754 SS = getLoc();
5755 if (Parser.parseExpression(ImmVal))
5756 return MatchOperand_ParseFail;
5757
5758 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
5759 if (!MCE)
5760 return MatchOperand_ParseFail;
5761
5762 Pattern = MCE->getValue();
5763 } else {
5764 // Parse the pattern
5765 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
5766 if (!Pat)
5767 return MatchOperand_NoMatch;
5768
5769 Parser.Lex();
5770 Pattern = Pat->Encoding;
5771 assert(Pattern >= 0 && Pattern < 32)((Pattern >= 0 && Pattern < 32) ? static_cast<
void> (0) : __assert_fail ("Pattern >= 0 && Pattern < 32"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5771, __PRETTY_FUNCTION__))
;
5772 }
5773
5774 Operands.push_back(
5775 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
5776 SS, getLoc(), getContext()));
5777
5778 return MatchOperand_Success;
5779}