Bug Summary

File:lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Warning:line 2353, column 7
1st function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AArch64AsmParser.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn338205/include -I /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/lib/Target/AArch64/AsmParser/.. -I /build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/.. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/x86_64-linux-gnu/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/x86_64-linux-gnu/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/lib/gcc/x86_64-linux-gnu/8/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/lib/Target/AArch64/AsmParser -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-07-29-043837-17923-1 -x c++ /build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp -faddrsig
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/AArch64AddressingModes.h"
11#include "MCTargetDesc/AArch64MCExpr.h"
12#include "MCTargetDesc/AArch64MCTargetDesc.h"
13#include "MCTargetDesc/AArch64TargetStreamer.h"
14#include "Utils/AArch64BaseInfo.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/ADT/StringExtras.h"
21#include "llvm/ADT/StringMap.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/ADT/StringSwitch.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/MC/MCContext.h"
26#include "llvm/MC/MCExpr.h"
27#include "llvm/MC/MCInst.h"
28#include "llvm/MC/MCLinkerOptimizationHint.h"
29#include "llvm/MC/MCObjectFileInfo.h"
30#include "llvm/MC/MCParser/MCAsmLexer.h"
31#include "llvm/MC/MCParser/MCAsmParser.h"
32#include "llvm/MC/MCParser/MCAsmParserExtension.h"
33#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
34#include "llvm/MC/MCParser/MCTargetAsmParser.h"
35#include "llvm/MC/MCRegisterInfo.h"
36#include "llvm/MC/MCStreamer.h"
37#include "llvm/MC/MCSubtargetInfo.h"
38#include "llvm/MC/MCSymbol.h"
39#include "llvm/MC/MCTargetOptions.h"
40#include "llvm/MC/SubtargetFeature.h"
41#include "llvm/Support/Casting.h"
42#include "llvm/Support/Compiler.h"
43#include "llvm/Support/ErrorHandling.h"
44#include "llvm/Support/MathExtras.h"
45#include "llvm/Support/SMLoc.h"
46#include "llvm/Support/TargetParser.h"
47#include "llvm/Support/TargetRegistry.h"
48#include "llvm/Support/raw_ostream.h"
49#include <cassert>
50#include <cctype>
51#include <cstdint>
52#include <cstdio>
53#include <string>
54#include <tuple>
55#include <utility>
56#include <vector>
57
58using namespace llvm;
59
60namespace {
61
62enum class RegKind {
63 Scalar,
64 NeonVector,
65 SVEDataVector,
66 SVEPredicateVector
67};
68
69enum RegConstraintEqualityTy {
70 EqualsReg,
71 EqualsSuperReg,
72 EqualsSubReg
73};
74
75class AArch64AsmParser : public MCTargetAsmParser {
76private:
77 StringRef Mnemonic; ///< Instruction mnemonic.
78
79 // Map of register aliases registers via the .req directive.
80 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
81
82 AArch64TargetStreamer &getTargetStreamer() {
83 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
84 return static_cast<AArch64TargetStreamer &>(TS);
85 }
86
87 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
88
89 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
90 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
91 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
92 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
93 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
94 bool parseRegister(OperandVector &Operands);
95 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
96 bool parseNeonVectorList(OperandVector &Operands);
97 bool parseOptionalMulOperand(OperandVector &Operands);
98 bool parseOperand(OperandVector &Operands, bool isCondCode,
99 bool invertCondCode);
100
101 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
102 OperandVector &Operands);
103
104 bool parseDirectiveArch(SMLoc L);
105 bool parseDirectiveCPU(SMLoc L);
106 bool parseDirectiveInst(SMLoc L);
107
108 bool parseDirectiveTLSDescCall(SMLoc L);
109
110 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
111 bool parseDirectiveLtorg(SMLoc L);
112
113 bool parseDirectiveReq(StringRef Name, SMLoc L);
114 bool parseDirectiveUnreq(SMLoc L);
115
116 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
117 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
118 OperandVector &Operands, MCStreamer &Out,
119 uint64_t &ErrorInfo,
120 bool MatchingInlineAsm) override;
121/// @name Auto-generated Match Functions
122/// {
123
124#define GET_ASSEMBLER_HEADER
125#include "AArch64GenAsmMatcher.inc"
126
127 /// }
128
129 OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
130 OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
131 RegKind MatchKind);
132 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
133 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
134 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
135 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
136 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
137 template <bool IsSVEPrefetch = false>
138 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
139 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
140 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
141 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
142 template<bool AddFPZeroAsLiteral>
143 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
144 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
145 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
146 bool tryParseNeonVectorRegister(OperandVector &Operands);
147 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
148 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
149 template <bool ParseShiftExtend,
150 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
151 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
152 template <bool ParseShiftExtend, bool ParseSuffix>
153 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
154 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
155 template <RegKind VectorKind>
156 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
157 bool ExpectMatch = false);
158 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
159
160public:
161 enum AArch64MatchResultTy {
162 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
163#define GET_OPERAND_DIAGNOSTIC_TYPES
164#include "AArch64GenAsmMatcher.inc"
165 };
166 bool IsILP32;
167
168 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
169 const MCInstrInfo &MII, const MCTargetOptions &Options)
170 : MCTargetAsmParser(Options, STI, MII) {
171 IsILP32 = Options.getABIName() == "ilp32";
172 MCAsmParserExtension::Initialize(Parser);
173 MCStreamer &S = getParser().getStreamer();
174 if (S.getTargetStreamer() == nullptr)
175 new AArch64TargetStreamer(S);
176
177 // Alias .hword/.word/xword to the target-independent .2byte/.4byte/.8byte
178 // directives as they have the same form and semantics:
179 /// ::= (.hword | .word | .xword ) [ expression (, expression)* ]
180 Parser.addAliasForDirective(".hword", ".2byte");
181 Parser.addAliasForDirective(".word", ".4byte");
182 Parser.addAliasForDirective(".xword", ".8byte");
183
184 // Initialize the set of available features.
185 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
186 }
187
188 bool regsEqual(const MCParsedAsmOperand &Op1,
189 const MCParsedAsmOperand &Op2) const override;
190 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
191 SMLoc NameLoc, OperandVector &Operands) override;
192 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
193 bool ParseDirective(AsmToken DirectiveID) override;
194 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
195 unsigned Kind) override;
196
197 static bool classifySymbolRef(const MCExpr *Expr,
198 AArch64MCExpr::VariantKind &ELFRefKind,
199 MCSymbolRefExpr::VariantKind &DarwinRefKind,
200 int64_t &Addend);
201};
202
203/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
204/// instruction.
205class AArch64Operand : public MCParsedAsmOperand {
206private:
207 enum KindTy {
208 k_Immediate,
209 k_ShiftedImm,
210 k_CondCode,
211 k_Register,
212 k_VectorList,
213 k_VectorIndex,
214 k_Token,
215 k_SysReg,
216 k_SysCR,
217 k_Prefetch,
218 k_ShiftExtend,
219 k_FPImm,
220 k_Barrier,
221 k_PSBHint,
222 } Kind;
223
224 SMLoc StartLoc, EndLoc;
225
226 struct TokOp {
227 const char *Data;
228 unsigned Length;
229 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
230 };
231
232 // Separate shift/extend operand.
233 struct ShiftExtendOp {
234 AArch64_AM::ShiftExtendType Type;
235 unsigned Amount;
236 bool HasExplicitAmount;
237 };
238
239 struct RegOp {
240 unsigned RegNum;
241 RegKind Kind;
242 int ElementWidth;
243
244 // The register may be allowed as a different register class,
245 // e.g. for GPR64as32 or GPR32as64.
246 RegConstraintEqualityTy EqualityTy;
247
248 // In some cases the shift/extend needs to be explicitly parsed together
249 // with the register, rather than as a separate operand. This is needed
250 // for addressing modes where the instruction as a whole dictates the
251 // scaling/extend, rather than specific bits in the instruction.
252 // By parsing them as a single operand, we avoid the need to pass an
253 // extra operand in all CodeGen patterns (because all operands need to
254 // have an associated value), and we avoid the need to update TableGen to
255 // accept operands that have no associated bits in the instruction.
256 //
257 // An added benefit of parsing them together is that the assembler
258 // can give a sensible diagnostic if the scaling is not correct.
259 //
260 // The default is 'lsl #0' (HasExplicitAmount = false) if no
261 // ShiftExtend is specified.
262 ShiftExtendOp ShiftExtend;
263 };
264
265 struct VectorListOp {
266 unsigned RegNum;
267 unsigned Count;
268 unsigned NumElements;
269 unsigned ElementWidth;
270 RegKind RegisterKind;
271 };
272
273 struct VectorIndexOp {
274 unsigned Val;
275 };
276
277 struct ImmOp {
278 const MCExpr *Val;
279 };
280
281 struct ShiftedImmOp {
282 const MCExpr *Val;
283 unsigned ShiftAmount;
284 };
285
286 struct CondCodeOp {
287 AArch64CC::CondCode Code;
288 };
289
290 struct FPImmOp {
291 uint64_t Val; // APFloat value bitcasted to uint64_t.
292 bool IsExact; // describes whether parsed value was exact.
293 };
294
295 struct BarrierOp {
296 const char *Data;
297 unsigned Length;
298 unsigned Val; // Not the enum since not all values have names.
299 };
300
301 struct SysRegOp {
302 const char *Data;
303 unsigned Length;
304 uint32_t MRSReg;
305 uint32_t MSRReg;
306 uint32_t PStateField;
307 };
308
309 struct SysCRImmOp {
310 unsigned Val;
311 };
312
313 struct PrefetchOp {
314 const char *Data;
315 unsigned Length;
316 unsigned Val;
317 };
318
319 struct PSBHintOp {
320 const char *Data;
321 unsigned Length;
322 unsigned Val;
323 };
324
325 struct ExtendOp {
326 unsigned Val;
327 };
328
329 union {
330 struct TokOp Tok;
331 struct RegOp Reg;
332 struct VectorListOp VectorList;
333 struct VectorIndexOp VectorIndex;
334 struct ImmOp Imm;
335 struct ShiftedImmOp ShiftedImm;
336 struct CondCodeOp CondCode;
337 struct FPImmOp FPImm;
338 struct BarrierOp Barrier;
339 struct SysRegOp SysReg;
340 struct SysCRImmOp SysCRImm;
341 struct PrefetchOp Prefetch;
342 struct PSBHintOp PSBHint;
343 struct ShiftExtendOp ShiftExtend;
344 };
345
346 // Keep the MCContext around as the MCExprs may need manipulated during
347 // the add<>Operands() calls.
348 MCContext &Ctx;
349
350public:
351 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
352
353 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
354 Kind = o.Kind;
355 StartLoc = o.StartLoc;
356 EndLoc = o.EndLoc;
357 switch (Kind) {
358 case k_Token:
359 Tok = o.Tok;
360 break;
361 case k_Immediate:
362 Imm = o.Imm;
363 break;
364 case k_ShiftedImm:
365 ShiftedImm = o.ShiftedImm;
366 break;
367 case k_CondCode:
368 CondCode = o.CondCode;
369 break;
370 case k_FPImm:
371 FPImm = o.FPImm;
372 break;
373 case k_Barrier:
374 Barrier = o.Barrier;
375 break;
376 case k_Register:
377 Reg = o.Reg;
378 break;
379 case k_VectorList:
380 VectorList = o.VectorList;
381 break;
382 case k_VectorIndex:
383 VectorIndex = o.VectorIndex;
384 break;
385 case k_SysReg:
386 SysReg = o.SysReg;
387 break;
388 case k_SysCR:
389 SysCRImm = o.SysCRImm;
390 break;
391 case k_Prefetch:
392 Prefetch = o.Prefetch;
393 break;
394 case k_PSBHint:
395 PSBHint = o.PSBHint;
396 break;
397 case k_ShiftExtend:
398 ShiftExtend = o.ShiftExtend;
399 break;
400 }
401 }
402
403 /// getStartLoc - Get the location of the first token of this operand.
404 SMLoc getStartLoc() const override { return StartLoc; }
405 /// getEndLoc - Get the location of the last token of this operand.
406 SMLoc getEndLoc() const override { return EndLoc; }
407
408 StringRef getToken() const {
409 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 409, __extension__ __PRETTY_FUNCTION__))
;
410 return StringRef(Tok.Data, Tok.Length);
411 }
412
413 bool isTokenSuffix() const {
414 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 414, __extension__ __PRETTY_FUNCTION__))
;
415 return Tok.IsSuffix;
416 }
417
418 const MCExpr *getImm() const {
419 assert(Kind == k_Immediate && "Invalid access!")(static_cast <bool> (Kind == k_Immediate && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 419, __extension__ __PRETTY_FUNCTION__))
;
420 return Imm.Val;
421 }
422
423 const MCExpr *getShiftedImmVal() const {
424 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 424, __extension__ __PRETTY_FUNCTION__))
;
425 return ShiftedImm.Val;
426 }
427
428 unsigned getShiftedImmShift() const {
429 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 429, __extension__ __PRETTY_FUNCTION__))
;
430 return ShiftedImm.ShiftAmount;
431 }
432
433 AArch64CC::CondCode getCondCode() const {
434 assert(Kind == k_CondCode && "Invalid access!")(static_cast <bool> (Kind == k_CondCode && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 434, __extension__ __PRETTY_FUNCTION__))
;
435 return CondCode.Code;
436 }
437
438 APFloat getFPImm() const {
439 assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 439, __extension__ __PRETTY_FUNCTION__))
;
440 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
441 }
442
443 bool getFPImmIsExact() const {
444 assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 444, __extension__ __PRETTY_FUNCTION__))
;
445 return FPImm.IsExact;
446 }
447
448 unsigned getBarrier() const {
449 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 449, __extension__ __PRETTY_FUNCTION__))
;
450 return Barrier.Val;
451 }
452
453 StringRef getBarrierName() const {
454 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 454, __extension__ __PRETTY_FUNCTION__))
;
455 return StringRef(Barrier.Data, Barrier.Length);
456 }
457
458 unsigned getReg() const override {
459 assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 459, __extension__ __PRETTY_FUNCTION__))
;
460 return Reg.RegNum;
461 }
462
463 RegConstraintEqualityTy getRegEqualityTy() const {
464 assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 464, __extension__ __PRETTY_FUNCTION__))
;
465 return Reg.EqualityTy;
466 }
467
468 unsigned getVectorListStart() const {
469 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 469, __extension__ __PRETTY_FUNCTION__))
;
470 return VectorList.RegNum;
471 }
472
473 unsigned getVectorListCount() const {
474 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 474, __extension__ __PRETTY_FUNCTION__))
;
475 return VectorList.Count;
476 }
477
478 unsigned getVectorIndex() const {
479 assert(Kind == k_VectorIndex && "Invalid access!")(static_cast <bool> (Kind == k_VectorIndex && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 479, __extension__ __PRETTY_FUNCTION__))
;
480 return VectorIndex.Val;
481 }
482
483 StringRef getSysReg() const {
484 assert(Kind == k_SysReg && "Invalid access!")(static_cast <bool> (Kind == k_SysReg && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 484, __extension__ __PRETTY_FUNCTION__))
;
485 return StringRef(SysReg.Data, SysReg.Length);
486 }
487
488 unsigned getSysCR() const {
489 assert(Kind == k_SysCR && "Invalid access!")(static_cast <bool> (Kind == k_SysCR && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 489, __extension__ __PRETTY_FUNCTION__))
;
490 return SysCRImm.Val;
491 }
492
493 unsigned getPrefetch() const {
494 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 494, __extension__ __PRETTY_FUNCTION__))
;
495 return Prefetch.Val;
496 }
497
498 unsigned getPSBHint() const {
499 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 499, __extension__ __PRETTY_FUNCTION__))
;
500 return PSBHint.Val;
501 }
502
503 StringRef getPSBHintName() const {
504 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 504, __extension__ __PRETTY_FUNCTION__))
;
505 return StringRef(PSBHint.Data, PSBHint.Length);
506 }
507
508 StringRef getPrefetchName() const {
509 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 509, __extension__ __PRETTY_FUNCTION__))
;
510 return StringRef(Prefetch.Data, Prefetch.Length);
511 }
512
513 AArch64_AM::ShiftExtendType getShiftExtendType() const {
514 if (Kind == k_ShiftExtend)
515 return ShiftExtend.Type;
516 if (Kind == k_Register)
517 return Reg.ShiftExtend.Type;
518 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 518)
;
519 }
520
521 unsigned getShiftExtendAmount() const {
522 if (Kind == k_ShiftExtend)
523 return ShiftExtend.Amount;
524 if (Kind == k_Register)
525 return Reg.ShiftExtend.Amount;
526 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 526)
;
527 }
528
529 bool hasShiftExtendAmount() const {
530 if (Kind == k_ShiftExtend)
531 return ShiftExtend.HasExplicitAmount;
532 if (Kind == k_Register)
533 return Reg.ShiftExtend.HasExplicitAmount;
534 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 534)
;
535 }
536
537 bool isImm() const override { return Kind == k_Immediate; }
538 bool isMem() const override { return false; }
539
540 bool isUImm6() const {
541 if (!isImm())
542 return false;
543 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
544 if (!MCE)
545 return false;
546 int64_t Val = MCE->getValue();
547 return (Val >= 0 && Val < 64);
548 }
549
550 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
551
552 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
553 return isImmScaled<Bits, Scale>(true);
554 }
555
556 template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
557 return isImmScaled<Bits, Scale>(false);
558 }
559
560 template <int Bits, int Scale>
561 DiagnosticPredicate isImmScaled(bool Signed) const {
562 if (!isImm())
563 return DiagnosticPredicateTy::NoMatch;
564
565 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
566 if (!MCE)
567 return DiagnosticPredicateTy::NoMatch;
568
569 int64_t MinVal, MaxVal;
570 if (Signed) {
571 int64_t Shift = Bits - 1;
572 MinVal = (int64_t(1) << Shift) * -Scale;
573 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
574 } else {
575 MinVal = 0;
576 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
577 }
578
579 int64_t Val = MCE->getValue();
580 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
581 return DiagnosticPredicateTy::Match;
582
583 return DiagnosticPredicateTy::NearMatch;
584 }
585
586 DiagnosticPredicate isSVEPattern() const {
587 if (!isImm())
588 return DiagnosticPredicateTy::NoMatch;
589 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
590 if (!MCE)
591 return DiagnosticPredicateTy::NoMatch;
592 int64_t Val = MCE->getValue();
593 if (Val >= 0 && Val < 32)
594 return DiagnosticPredicateTy::Match;
595 return DiagnosticPredicateTy::NearMatch;
596 }
597
598 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
599 AArch64MCExpr::VariantKind ELFRefKind;
600 MCSymbolRefExpr::VariantKind DarwinRefKind;
601 int64_t Addend;
602 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
603 Addend)) {
604 // If we don't understand the expression, assume the best and
605 // let the fixup and relocation code deal with it.
606 return true;
607 }
608
609 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
610 ELFRefKind == AArch64MCExpr::VK_LO12 ||
611 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
612 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
613 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
614 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
615 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
616 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
617 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
618 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
619 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
620 // Note that we don't range-check the addend. It's adjusted modulo page
621 // size when converted, so there is no "out of range" condition when using
622 // @pageoff.
623 return Addend >= 0 && (Addend % Scale) == 0;
624 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
625 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
626 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
627 return Addend == 0;
628 }
629
630 return false;
631 }
632
633 template <int Scale> bool isUImm12Offset() const {
634 if (!isImm())
635 return false;
636
637 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
638 if (!MCE)
639 return isSymbolicUImm12Offset(getImm(), Scale);
640
641 int64_t Val = MCE->getValue();
642 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
643 }
644
645 template <int N, int M>
646 bool isImmInRange() const {
647 if (!isImm())
648 return false;
649 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
650 if (!MCE)
651 return false;
652 int64_t Val = MCE->getValue();
653 return (Val >= N && Val <= M);
654 }
655
656 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
657 // a logical immediate can always be represented when inverted.
658 template <typename T>
659 bool isLogicalImm() const {
660 if (!isImm())
661 return false;
662 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
663 if (!MCE)
664 return false;
665
666 int64_t Val = MCE->getValue();
667 int64_t SVal = typename std::make_signed<T>::type(Val);
668 int64_t UVal = typename std::make_unsigned<T>::type(Val);
669 if (Val != SVal && Val != UVal)
670 return false;
671
672 return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8);
673 }
674
675 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
676
677 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
678 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
679 /// immediate that can be shifted by 'Shift'.
680 template <unsigned Width>
681 Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
682 if (isShiftedImm() && Width == getShiftedImmShift())
683 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
684 return std::make_pair(CE->getValue(), Width);
685
686 if (isImm())
687 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
688 int64_t Val = CE->getValue();
689 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
690 return std::make_pair(Val >> Width, Width);
691 else
692 return std::make_pair(Val, 0u);
693 }
694
695 return {};
696 }
697
698 bool isAddSubImm() const {
699 if (!isShiftedImm() && !isImm())
700 return false;
701
702 const MCExpr *Expr;
703
704 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
705 if (isShiftedImm()) {
706 unsigned Shift = ShiftedImm.ShiftAmount;
707 Expr = ShiftedImm.Val;
708 if (Shift != 0 && Shift != 12)
709 return false;
710 } else {
711 Expr = getImm();
712 }
713
714 AArch64MCExpr::VariantKind ELFRefKind;
715 MCSymbolRefExpr::VariantKind DarwinRefKind;
716 int64_t Addend;
717 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
718 DarwinRefKind, Addend)) {
719 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
720 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
721 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
722 || ELFRefKind == AArch64MCExpr::VK_LO12
723 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
724 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
725 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
726 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
727 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
728 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
729 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
730 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
731 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
732 }
733
734 // If it's a constant, it should be a real immediate in range.
735 if (auto ShiftedVal = getShiftedVal<12>())
736 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
737
738 // If it's an expression, we hope for the best and let the fixup/relocation
739 // code deal with it.
740 return true;
741 }
742
743 bool isAddSubImmNeg() const {
744 if (!isShiftedImm() && !isImm())
745 return false;
746
747 // Otherwise it should be a real negative immediate in range.
748 if (auto ShiftedVal = getShiftedVal<12>())
749 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
750
751 return false;
752 }
753
754 // Signed value in the range -128 to +127. For element widths of
755 // 16 bits or higher it may also be a signed multiple of 256 in the
756 // range -32768 to +32512.
757 // For element-width of 8 bits a range of -128 to 255 is accepted,
758 // since a copy of a byte can be either signed/unsigned.
759 template <typename T>
760 DiagnosticPredicate isSVECpyImm() const {
761 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
762 return DiagnosticPredicateTy::NoMatch;
763
764 bool IsByte =
765 std::is_same<int8_t, typename std::make_signed<T>::type>::value;
766 if (auto ShiftedImm = getShiftedVal<8>())
767 if (!(IsByte && ShiftedImm->second) &&
768 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
769 << ShiftedImm->second))
770 return DiagnosticPredicateTy::Match;
771
772 return DiagnosticPredicateTy::NearMatch;
773 }
774
775 // Unsigned value in the range 0 to 255. For element widths of
776 // 16 bits or higher it may also be a signed multiple of 256 in the
777 // range 0 to 65280.
778 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
779 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
780 return DiagnosticPredicateTy::NoMatch;
781
782 bool IsByte =
783 std::is_same<int8_t, typename std::make_signed<T>::type>::value;
784 if (auto ShiftedImm = getShiftedVal<8>())
785 if (!(IsByte && ShiftedImm->second) &&
786 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
787 << ShiftedImm->second))
788 return DiagnosticPredicateTy::Match;
789
790 return DiagnosticPredicateTy::NearMatch;
791 }
792
793 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
794 if (isLogicalImm<T>() && !isSVECpyImm<T>())
795 return DiagnosticPredicateTy::Match;
796 return DiagnosticPredicateTy::NoMatch;
797 }
798
799 bool isCondCode() const { return Kind == k_CondCode; }
800
801 bool isSIMDImmType10() const {
802 if (!isImm())
803 return false;
804 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
805 if (!MCE)
806 return false;
807 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
808 }
809
810 template<int N>
811 bool isBranchTarget() const {
812 if (!isImm())
813 return false;
814 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
815 if (!MCE)
816 return true;
817 int64_t Val = MCE->getValue();
818 if (Val & 0x3)
819 return false;
820 assert(N > 0 && "Branch target immediate cannot be 0 bits!")(static_cast <bool> (N > 0 && "Branch target immediate cannot be 0 bits!"
) ? void (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 820, __extension__ __PRETTY_FUNCTION__))
;
821 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
822 }
823
824 bool
825 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
826 if (!isImm())
827 return false;
828
829 AArch64MCExpr::VariantKind ELFRefKind;
830 MCSymbolRefExpr::VariantKind DarwinRefKind;
831 int64_t Addend;
832 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
833 DarwinRefKind, Addend)) {
834 return false;
835 }
836 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
837 return false;
838
839 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
840 if (ELFRefKind == AllowedModifiers[i])
841 return Addend == 0;
842 }
843
844 return false;
845 }
846
847 bool isMovZSymbolG3() const {
848 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
849 }
850
851 bool isMovZSymbolG2() const {
852 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
853 AArch64MCExpr::VK_TPREL_G2,
854 AArch64MCExpr::VK_DTPREL_G2});
855 }
856
857 bool isMovZSymbolG1() const {
858 return isMovWSymbol({
859 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
860 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
861 AArch64MCExpr::VK_DTPREL_G1,
862 });
863 }
864
865 bool isMovZSymbolG0() const {
866 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
867 AArch64MCExpr::VK_TPREL_G0,
868 AArch64MCExpr::VK_DTPREL_G0});
869 }
870
871 bool isMovKSymbolG3() const {
872 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
873 }
874
875 bool isMovKSymbolG2() const {
876 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
877 }
878
879 bool isMovKSymbolG1() const {
880 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
881 AArch64MCExpr::VK_TPREL_G1_NC,
882 AArch64MCExpr::VK_DTPREL_G1_NC});
883 }
884
885 bool isMovKSymbolG0() const {
886 return isMovWSymbol(
887 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
888 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
889 }
890
891 template<int RegWidth, int Shift>
892 bool isMOVZMovAlias() const {
893 if (!isImm()) return false;
894
895 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
896 if (!CE) return false;
897 uint64_t Value = CE->getValue();
898
899 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
900 }
901
902 template<int RegWidth, int Shift>
903 bool isMOVNMovAlias() const {
904 if (!isImm()) return false;
905
906 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
907 if (!CE) return false;
908 uint64_t Value = CE->getValue();
909
910 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
911 }
912
913 bool isFPImm() const {
914 return Kind == k_FPImm &&
915 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
916 }
917
918 bool isBarrier() const { return Kind == k_Barrier; }
919 bool isSysReg() const { return Kind == k_SysReg; }
920
921 bool isMRSSystemRegister() const {
922 if (!isSysReg()) return false;
923
924 return SysReg.MRSReg != -1U;
925 }
926
927 bool isMSRSystemRegister() const {
928 if (!isSysReg()) return false;
929 return SysReg.MSRReg != -1U;
930 }
931
932 bool isSystemPStateFieldWithImm0_1() const {
933 if (!isSysReg()) return false;
934 return (SysReg.PStateField == AArch64PState::PAN ||
935 SysReg.PStateField == AArch64PState::DIT ||
936 SysReg.PStateField == AArch64PState::UAO);
937 }
938
939 bool isSystemPStateFieldWithImm0_15() const {
940 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
941 return SysReg.PStateField != -1U;
942 }
943
944 bool isReg() const override {
945 return Kind == k_Register;
946 }
947
948 bool isScalarReg() const {
949 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
950 }
951
952 bool isNeonVectorReg() const {
953 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
954 }
955
956 bool isNeonVectorRegLo() const {
957 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
958 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
959 Reg.RegNum);
960 }
961
962 template <unsigned Class> bool isSVEVectorReg() const {
963 RegKind RK;
964 switch (Class) {
965 case AArch64::ZPRRegClassID:
966 case AArch64::ZPR_3bRegClassID:
967 case AArch64::ZPR_4bRegClassID:
968 RK = RegKind::SVEDataVector;
969 break;
970 case AArch64::PPRRegClassID:
971 case AArch64::PPR_3bRegClassID:
972 RK = RegKind::SVEPredicateVector;
973 break;
974 default:
975 llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class",
"/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 975)
;
976 }
977
978 return (Kind == k_Register && Reg.Kind == RK) &&
979 AArch64MCRegisterClasses[Class].contains(getReg());
980 }
981
982 template <unsigned Class> bool isFPRasZPR() const {
983 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
984 AArch64MCRegisterClasses[Class].contains(getReg());
985 }
986
987 template <int ElementWidth, unsigned Class>
988 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
989 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
990 return DiagnosticPredicateTy::NoMatch;
991
992 if (isSVEVectorReg<Class>() &&
993 (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
994 return DiagnosticPredicateTy::Match;
995
996 return DiagnosticPredicateTy::NearMatch;
997 }
998
999 template <int ElementWidth, unsigned Class>
1000 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1001 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1002 return DiagnosticPredicateTy::NoMatch;
1003
1004 if (isSVEVectorReg<Class>() &&
1005 (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
1006 return DiagnosticPredicateTy::Match;
1007
1008 return DiagnosticPredicateTy::NearMatch;
1009 }
1010
1011 template <int ElementWidth, unsigned Class,
1012 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1013 bool ShiftWidthAlwaysSame>
1014 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1015 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1016 if (!VectorMatch.isMatch())
1017 return DiagnosticPredicateTy::NoMatch;
1018
1019 // Give a more specific diagnostic when the user has explicitly typed in
1020 // a shift-amount that does not match what is expected, but for which
1021 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1022 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1023 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1024 ShiftExtendTy == AArch64_AM::SXTW) &&
1025 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1026 return DiagnosticPredicateTy::NoMatch;
1027
1028 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1029 return DiagnosticPredicateTy::Match;
1030
1031 return DiagnosticPredicateTy::NearMatch;
1032 }
1033
1034 bool isGPR32as64() const {
1035 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1036 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1037 }
1038
1039 bool isGPR64as32() const {
1040 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1041 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1042 }
1043
1044 bool isWSeqPair() const {
1045 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1046 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1047 Reg.RegNum);
1048 }
1049
1050 bool isXSeqPair() const {
1051 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1052 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1053 Reg.RegNum);
1054 }
1055
1056 template<int64_t Angle, int64_t Remainder>
1057 DiagnosticPredicate isComplexRotation() const {
1058 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1059
1060 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1061 if (!CE) return DiagnosticPredicateTy::NoMatch;
1062 uint64_t Value = CE->getValue();
1063
1064 if (Value % Angle == Remainder && Value <= 270)
1065 return DiagnosticPredicateTy::Match;
1066 return DiagnosticPredicateTy::NearMatch;
1067 }
1068
1069 template <unsigned RegClassID> bool isGPR64() const {
1070 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1071 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1072 }
1073
1074 template <unsigned RegClassID, int ExtWidth>
1075 DiagnosticPredicate isGPR64WithShiftExtend() const {
1076 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1077 return DiagnosticPredicateTy::NoMatch;
1078
1079 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1080 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1081 return DiagnosticPredicateTy::Match;
1082 return DiagnosticPredicateTy::NearMatch;
1083 }
1084
1085 /// Is this a vector list with the type implicit (presumably attached to the
1086 /// instruction itself)?
1087 template <RegKind VectorKind, unsigned NumRegs>
1088 bool isImplicitlyTypedVectorList() const {
1089 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1090 VectorList.NumElements == 0 &&
1091 VectorList.RegisterKind == VectorKind;
1092 }
1093
1094 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1095 unsigned ElementWidth>
1096 bool isTypedVectorList() const {
1097 if (Kind != k_VectorList)
1098 return false;
1099 if (VectorList.Count != NumRegs)
1100 return false;
1101 if (VectorList.RegisterKind != VectorKind)
1102 return false;
1103 if (VectorList.ElementWidth != ElementWidth)
1104 return false;
1105 return VectorList.NumElements == NumElements;
1106 }
1107
1108 template <int Min, int Max>
1109 DiagnosticPredicate isVectorIndex() const {
1110 if (Kind != k_VectorIndex)
1111 return DiagnosticPredicateTy::NoMatch;
1112 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1113 return DiagnosticPredicateTy::Match;
1114 return DiagnosticPredicateTy::NearMatch;
1115 }
1116
1117 bool isToken() const override { return Kind == k_Token; }
1118
1119 bool isTokenEqual(StringRef Str) const {
1120 return Kind == k_Token && getToken() == Str;
1121 }
1122 bool isSysCR() const { return Kind == k_SysCR; }
1123 bool isPrefetch() const { return Kind == k_Prefetch; }
1124 bool isPSBHint() const { return Kind == k_PSBHint; }
1125 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1126 bool isShifter() const {
1127 if (!isShiftExtend())
1128 return false;
1129
1130 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1131 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1132 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1133 ST == AArch64_AM::MSL);
1134 }
1135
1136 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1137 if (Kind != k_FPImm)
1138 return DiagnosticPredicateTy::NoMatch;
1139
1140 if (getFPImmIsExact()) {
1141 // Lookup the immediate from table of supported immediates.
1142 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1143 assert(Desc && "Unknown enum value")(static_cast <bool> (Desc && "Unknown enum value"
) ? void (0) : __assert_fail ("Desc && \"Unknown enum value\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1143, __extension__ __PRETTY_FUNCTION__))
;
1144
1145 // Calculate its FP value.
1146 APFloat RealVal(APFloat::IEEEdouble());
1147 if (RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero) !=
1148 APFloat::opOK)
1149 llvm_unreachable("FP immediate is not exact")::llvm::llvm_unreachable_internal("FP immediate is not exact"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1149)
;
1150
1151 if (getFPImm().bitwiseIsEqual(RealVal))
1152 return DiagnosticPredicateTy::Match;
1153 }
1154
1155 return DiagnosticPredicateTy::NearMatch;
1156 }
1157
1158 template <unsigned ImmA, unsigned ImmB>
1159 DiagnosticPredicate isExactFPImm() const {
1160 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1161 if ((Res = isExactFPImm<ImmA>()))
1162 return DiagnosticPredicateTy::Match;
1163 if ((Res = isExactFPImm<ImmB>()))
1164 return DiagnosticPredicateTy::Match;
1165 return Res;
1166 }
1167
1168 bool isExtend() const {
1169 if (!isShiftExtend())
1170 return false;
1171
1172 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1173 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1174 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1175 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1176 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1177 ET == AArch64_AM::LSL) &&
1178 getShiftExtendAmount() <= 4;
1179 }
1180
1181 bool isExtend64() const {
1182 if (!isExtend())
1183 return false;
1184 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
1185 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1186 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
1187 }
1188
1189 bool isExtendLSL64() const {
1190 if (!isExtend())
1191 return false;
1192 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1193 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1194 ET == AArch64_AM::LSL) &&
1195 getShiftExtendAmount() <= 4;
1196 }
1197
1198 template<int Width> bool isMemXExtend() const {
1199 if (!isExtend())
1200 return false;
1201 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1202 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1203 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1204 getShiftExtendAmount() == 0);
1205 }
1206
1207 template<int Width> bool isMemWExtend() const {
1208 if (!isExtend())
1209 return false;
1210 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1211 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1212 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1213 getShiftExtendAmount() == 0);
1214 }
1215
1216 template <unsigned width>
1217 bool isArithmeticShifter() const {
1218 if (!isShifter())
1219 return false;
1220
1221 // An arithmetic shifter is LSL, LSR, or ASR.
1222 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1223 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1224 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1225 }
1226
1227 template <unsigned width>
1228 bool isLogicalShifter() const {
1229 if (!isShifter())
1230 return false;
1231
1232 // A logical shifter is LSL, LSR, ASR or ROR.
1233 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1234 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1235 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1236 getShiftExtendAmount() < width;
1237 }
1238
1239 bool isMovImm32Shifter() const {
1240 if (!isShifter())
1241 return false;
1242
1243 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1244 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1245 if (ST != AArch64_AM::LSL)
1246 return false;
1247 uint64_t Val = getShiftExtendAmount();
1248 return (Val == 0 || Val == 16);
1249 }
1250
1251 bool isMovImm64Shifter() const {
1252 if (!isShifter())
1253 return false;
1254
1255 // A MOVi shifter is LSL of 0 or 16.
1256 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1257 if (ST != AArch64_AM::LSL)
1258 return false;
1259 uint64_t Val = getShiftExtendAmount();
1260 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1261 }
1262
1263 bool isLogicalVecShifter() const {
1264 if (!isShifter())
1265 return false;
1266
1267 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1268 unsigned Shift = getShiftExtendAmount();
1269 return getShiftExtendType() == AArch64_AM::LSL &&
1270 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1271 }
1272
1273 bool isLogicalVecHalfWordShifter() const {
1274 if (!isLogicalVecShifter())
1275 return false;
1276
1277 // A logical vector shifter is a left shift by 0 or 8.
1278 unsigned Shift = getShiftExtendAmount();
1279 return getShiftExtendType() == AArch64_AM::LSL &&
1280 (Shift == 0 || Shift == 8);
1281 }
1282
1283 bool isMoveVecShifter() const {
1284 if (!isShiftExtend())
1285 return false;
1286
1287 // A logical vector shifter is a left shift by 8 or 16.
1288 unsigned Shift = getShiftExtendAmount();
1289 return getShiftExtendType() == AArch64_AM::MSL &&
1290 (Shift == 8 || Shift == 16);
1291 }
1292
1293 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1294 // to LDUR/STUR when the offset is not legal for the former but is for
1295 // the latter. As such, in addition to checking for being a legal unscaled
1296 // address, also check that it is not a legal scaled address. This avoids
1297 // ambiguity in the matcher.
1298 template<int Width>
1299 bool isSImm9OffsetFB() const {
1300 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1301 }
1302
1303 bool isAdrpLabel() const {
1304 // Validation was handled during parsing, so we just sanity check that
1305 // something didn't go haywire.
1306 if (!isImm())
1307 return false;
1308
1309 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1310 int64_t Val = CE->getValue();
1311 int64_t Min = - (4096 * (1LL << (21 - 1)));
1312 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1313 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1314 }
1315
1316 return true;
1317 }
1318
1319 bool isAdrLabel() const {
1320 // Validation was handled during parsing, so we just sanity check that
1321 // something didn't go haywire.
1322 if (!isImm())
1323 return false;
1324
1325 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1326 int64_t Val = CE->getValue();
1327 int64_t Min = - (1LL << (21 - 1));
1328 int64_t Max = ((1LL << (21 - 1)) - 1);
1329 return Val >= Min && Val <= Max;
1330 }
1331
1332 return true;
1333 }
1334
1335 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1336 // Add as immediates when possible. Null MCExpr = 0.
1337 if (!Expr)
1338 Inst.addOperand(MCOperand::createImm(0));
1339 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1340 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1341 else
1342 Inst.addOperand(MCOperand::createExpr(Expr));
1343 }
1344
1345 void addRegOperands(MCInst &Inst, unsigned N) const {
1346 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1346, __extension__ __PRETTY_FUNCTION__))
;
1347 Inst.addOperand(MCOperand::createReg(getReg()));
1348 }
1349
1350 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1351 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1351, __extension__ __PRETTY_FUNCTION__))
;
1352 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1353, __extension__ __PRETTY_FUNCTION__))
1353 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1353, __extension__ __PRETTY_FUNCTION__))
;
1354
1355 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1356 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1357 RI->getEncodingValue(getReg()));
1358
1359 Inst.addOperand(MCOperand::createReg(Reg));
1360 }
1361
1362 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1363 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1363, __extension__ __PRETTY_FUNCTION__))
;
1364 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1365, __extension__ __PRETTY_FUNCTION__))
1365 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1365, __extension__ __PRETTY_FUNCTION__))
;
1366
1367 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1368 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1369 RI->getEncodingValue(getReg()));
1370
1371 Inst.addOperand(MCOperand::createReg(Reg));
1372 }
1373
1374 template <int Width>
1375 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1376 unsigned Base;
1377 switch (Width) {
1378 case 8: Base = AArch64::B0; break;
1379 case 16: Base = AArch64::H0; break;
1380 case 32: Base = AArch64::S0; break;
1381 case 64: Base = AArch64::D0; break;
1382 case 128: Base = AArch64::Q0; break;
1383 default:
1384 llvm_unreachable("Unsupported width")::llvm::llvm_unreachable_internal("Unsupported width", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1384)
;
1385 }
1386 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1387 }
1388
1389 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1390 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1390, __extension__ __PRETTY_FUNCTION__))
;
1391 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1392, __extension__ __PRETTY_FUNCTION__))
1392 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1392, __extension__ __PRETTY_FUNCTION__))
;
1393 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1394 }
1395
1396 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1397 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1397, __extension__ __PRETTY_FUNCTION__))
;
1398 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1399, __extension__ __PRETTY_FUNCTION__))
1399 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1399, __extension__ __PRETTY_FUNCTION__))
;
1400 Inst.addOperand(MCOperand::createReg(getReg()));
1401 }
1402
1403 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1404 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1404, __extension__ __PRETTY_FUNCTION__))
;
1405 Inst.addOperand(MCOperand::createReg(getReg()));
1406 }
1407
1408 enum VecListIndexType {
1409 VecListIdx_DReg = 0,
1410 VecListIdx_QReg = 1,
1411 VecListIdx_ZReg = 2,
1412 };
1413
1414 template <VecListIndexType RegTy, unsigned NumRegs>
1415 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1416 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1416, __extension__ __PRETTY_FUNCTION__))
;
1417 static const unsigned FirstRegs[][5] = {
1418 /* DReg */ { AArch64::Q0,
1419 AArch64::D0, AArch64::D0_D1,
1420 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1421 /* QReg */ { AArch64::Q0,
1422 AArch64::Q0, AArch64::Q0_Q1,
1423 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1424 /* ZReg */ { AArch64::Z0,
1425 AArch64::Z0, AArch64::Z0_Z1,
1426 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1427 };
1428
1429 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs
<= 4) && " NumRegs must be <= 4 for ZRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1430, __extension__ __PRETTY_FUNCTION__))
1430 " NumRegs must be <= 4 for ZRegs")(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs
<= 4) && " NumRegs must be <= 4 for ZRegs") ? void
(0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1430, __extension__ __PRETTY_FUNCTION__))
;
1431
1432 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1433 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1434 FirstRegs[(unsigned)RegTy][0]));
1435 }
1436
1437 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1438 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1438, __extension__ __PRETTY_FUNCTION__))
;
1439 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1440 }
1441
1442 template <unsigned ImmIs0, unsigned ImmIs1>
1443 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1444 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1444, __extension__ __PRETTY_FUNCTION__))
;
1445 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")(static_cast <bool> (bool(isExactFPImm<ImmIs0, ImmIs1
>()) && "Invalid operand") ? void (0) : __assert_fail
("bool(isExactFPImm<ImmIs0, ImmIs1>()) && \"Invalid operand\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1445, __extension__ __PRETTY_FUNCTION__))
;
1446 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1447 }
1448
1449 void addImmOperands(MCInst &Inst, unsigned N) const {
1450 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1450, __extension__ __PRETTY_FUNCTION__))
;
1451 // If this is a pageoff symrefexpr with an addend, adjust the addend
1452 // to be only the page-offset portion. Otherwise, just add the expr
1453 // as-is.
1454 addExpr(Inst, getImm());
1455 }
1456
1457 template <int Shift>
1458 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1459 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1459, __extension__ __PRETTY_FUNCTION__))
;
1460 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1461 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1462 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1463 } else if (isShiftedImm()) {
1464 addExpr(Inst, getShiftedImmVal());
1465 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1466 } else {
1467 addExpr(Inst, getImm());
1468 Inst.addOperand(MCOperand::createImm(0));
1469 }
1470 }
1471
1472 template <int Shift>
1473 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1474 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1474, __extension__ __PRETTY_FUNCTION__))
;
1475 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1476 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1477 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1478 } else
1479 llvm_unreachable("Not a shifted negative immediate")::llvm::llvm_unreachable_internal("Not a shifted negative immediate"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1479)
;
1480 }
1481
1482 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1483 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1483, __extension__ __PRETTY_FUNCTION__))
;
1484 Inst.addOperand(MCOperand::createImm(getCondCode()));
1485 }
1486
1487 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1488 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1488, __extension__ __PRETTY_FUNCTION__))
;
1489 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1490 if (!MCE)
1491 addExpr(Inst, getImm());
1492 else
1493 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1494 }
1495
1496 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1497 addImmOperands(Inst, N);
1498 }
1499
1500 template<int Scale>
1501 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1502 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1502, __extension__ __PRETTY_FUNCTION__))
;
1503 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1504
1505 if (!MCE) {
1506 Inst.addOperand(MCOperand::createExpr(getImm()));
1507 return;
1508 }
1509 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1510 }
1511
1512 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1513 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1513, __extension__ __PRETTY_FUNCTION__))
;
1514 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1515 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1516 }
1517
1518 template <int Scale>
1519 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1520 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1520, __extension__ __PRETTY_FUNCTION__))
;
1521 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1522 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1523 }
1524
1525 template <typename T>
1526 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1527 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1527, __extension__ __PRETTY_FUNCTION__))
;
1528 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1529 typename std::make_unsigned<T>::type Val = MCE->getValue();
1530 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1531 Inst.addOperand(MCOperand::createImm(encoding));
1532 }
1533
1534 template <typename T>
1535 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1536 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1536, __extension__ __PRETTY_FUNCTION__))
;
1537 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1538 typename std::make_unsigned<T>::type Val = ~MCE->getValue();
1539 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1540 Inst.addOperand(MCOperand::createImm(encoding));
1541 }
1542
1543 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1544 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1544, __extension__ __PRETTY_FUNCTION__))
;
1545 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1546 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1547 Inst.addOperand(MCOperand::createImm(encoding));
1548 }
1549
1550 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1551 // Branch operands don't encode the low bits, so shift them off
1552 // here. If it's a label, however, just put it on directly as there's
1553 // not enough information now to do anything.
1554 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1554, __extension__ __PRETTY_FUNCTION__))
;
1555 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1556 if (!MCE) {
1557 addExpr(Inst, getImm());
1558 return;
1559 }
1560 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1560, __extension__ __PRETTY_FUNCTION__))
;
1561 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1562 }
1563
1564 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1565 // Branch operands don't encode the low bits, so shift them off
1566 // here. If it's a label, however, just put it on directly as there's
1567 // not enough information now to do anything.
1568 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1568, __extension__ __PRETTY_FUNCTION__))
;
1569 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1570 if (!MCE) {
1571 addExpr(Inst, getImm());
1572 return;
1573 }
1574 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1574, __extension__ __PRETTY_FUNCTION__))
;
1575 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1576 }
1577
1578 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1579 // Branch operands don't encode the low bits, so shift them off
1580 // here. If it's a label, however, just put it on directly as there's
1581 // not enough information now to do anything.
1582 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1582, __extension__ __PRETTY_FUNCTION__))
;
1583 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1584 if (!MCE) {
1585 addExpr(Inst, getImm());
1586 return;
1587 }
1588 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1588, __extension__ __PRETTY_FUNCTION__))
;
1589 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1590 }
1591
1592 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1593 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1593, __extension__ __PRETTY_FUNCTION__))
;
1594 Inst.addOperand(MCOperand::createImm(
1595 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1596 }
1597
1598 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1599 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1599, __extension__ __PRETTY_FUNCTION__))
;
1600 Inst.addOperand(MCOperand::createImm(getBarrier()));
1601 }
1602
1603 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1604 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1604, __extension__ __PRETTY_FUNCTION__))
;
1605
1606 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1607 }
1608
1609 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1610 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1610, __extension__ __PRETTY_FUNCTION__))
;
1611
1612 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1613 }
1614
1615 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1616 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1616, __extension__ __PRETTY_FUNCTION__))
;
1617
1618 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1619 }
1620
1621 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1622 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1622, __extension__ __PRETTY_FUNCTION__))
;
1623
1624 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1625 }
1626
1627 void addSysCROperands(MCInst &Inst, unsigned N) const {
1628 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1628, __extension__ __PRETTY_FUNCTION__))
;
1629 Inst.addOperand(MCOperand::createImm(getSysCR()));
1630 }
1631
1632 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1633 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1633, __extension__ __PRETTY_FUNCTION__))
;
1634 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1635 }
1636
1637 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1638 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1638, __extension__ __PRETTY_FUNCTION__))
;
1639 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1640 }
1641
1642 void addShifterOperands(MCInst &Inst, unsigned N) const {
1643 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1643, __extension__ __PRETTY_FUNCTION__))
;
1644 unsigned Imm =
1645 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1646 Inst.addOperand(MCOperand::createImm(Imm));
1647 }
1648
1649 void addExtendOperands(MCInst &Inst, unsigned N) const {
1650 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1650, __extension__ __PRETTY_FUNCTION__))
;
1651 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1652 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1653 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1654 Inst.addOperand(MCOperand::createImm(Imm));
1655 }
1656
1657 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1658 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1658, __extension__ __PRETTY_FUNCTION__))
;
1659 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1660 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1661 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1662 Inst.addOperand(MCOperand::createImm(Imm));
1663 }
1664
1665 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1666 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1666, __extension__ __PRETTY_FUNCTION__))
;
1667 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1668 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1669 Inst.addOperand(MCOperand::createImm(IsSigned));
1670 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1671 }
1672
1673 // For 8-bit load/store instructions with a register offset, both the
1674 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1675 // they're disambiguated by whether the shift was explicit or implicit rather
1676 // than its size.
1677 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1678 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1678, __extension__ __PRETTY_FUNCTION__))
;
1679 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1680 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1681 Inst.addOperand(MCOperand::createImm(IsSigned));
1682 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1683 }
1684
1685 template<int Shift>
1686 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1687 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1687, __extension__ __PRETTY_FUNCTION__))
;
1688
1689 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1690 uint64_t Value = CE->getValue();
1691 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1692 }
1693
1694 template<int Shift>
1695 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1696 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1696, __extension__ __PRETTY_FUNCTION__))
;
1697
1698 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1699 uint64_t Value = CE->getValue();
1700 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1701 }
1702
1703 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1704 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1704, __extension__ __PRETTY_FUNCTION__))
;
1705 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1706 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1707 }
1708
1709 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1710 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1710, __extension__ __PRETTY_FUNCTION__))
;
1711 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1712 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1713 }
1714
1715 void print(raw_ostream &OS) const override;
1716
1717 static std::unique_ptr<AArch64Operand>
1718 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1719 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1720 Op->Tok.Data = Str.data();
1721 Op->Tok.Length = Str.size();
1722 Op->Tok.IsSuffix = IsSuffix;
1723 Op->StartLoc = S;
1724 Op->EndLoc = S;
1725 return Op;
1726 }
1727
1728 static std::unique_ptr<AArch64Operand>
1729 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1730 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1731 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1732 unsigned ShiftAmount = 0,
1733 unsigned HasExplicitAmount = false) {
1734 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1735 Op->Reg.RegNum = RegNum;
1736 Op->Reg.Kind = Kind;
1737 Op->Reg.ElementWidth = 0;
1738 Op->Reg.EqualityTy = EqTy;
1739 Op->Reg.ShiftExtend.Type = ExtTy;
1740 Op->Reg.ShiftExtend.Amount = ShiftAmount;
1741 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1742 Op->StartLoc = S;
1743 Op->EndLoc = E;
1744 return Op;
1745 }
1746
1747 static std::unique_ptr<AArch64Operand>
1748 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1749 SMLoc S, SMLoc E, MCContext &Ctx,
1750 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1751 unsigned ShiftAmount = 0,
1752 unsigned HasExplicitAmount = false) {
1753 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
) && "Invalid vector kind") ? void (0) : __assert_fail
("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1755, __extension__ __PRETTY_FUNCTION__))
1754 Kind == RegKind::SVEPredicateVector) &&(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
) && "Invalid vector kind") ? void (0) : __assert_fail
("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1755, __extension__ __PRETTY_FUNCTION__))
1755 "Invalid vector kind")(static_cast <bool> ((Kind == RegKind::NeonVector || Kind
== RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector
) && "Invalid vector kind") ? void (0) : __assert_fail
("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1755, __extension__ __PRETTY_FUNCTION__))
;
1756 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1757 HasExplicitAmount);
1758 Op->Reg.ElementWidth = ElementWidth;
1759 return Op;
1760 }
1761
1762 static std::unique_ptr<AArch64Operand>
1763 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1764 unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1765 MCContext &Ctx) {
1766 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1767 Op->VectorList.RegNum = RegNum;
1768 Op->VectorList.Count = Count;
1769 Op->VectorList.NumElements = NumElements;
1770 Op->VectorList.ElementWidth = ElementWidth;
1771 Op->VectorList.RegisterKind = RegisterKind;
1772 Op->StartLoc = S;
1773 Op->EndLoc = E;
1774 return Op;
1775 }
1776
1777 static std::unique_ptr<AArch64Operand>
1778 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1779 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1780 Op->VectorIndex.Val = Idx;
1781 Op->StartLoc = S;
1782 Op->EndLoc = E;
1783 return Op;
1784 }
1785
1786 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1787 SMLoc E, MCContext &Ctx) {
1788 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1789 Op->Imm.Val = Val;
1790 Op->StartLoc = S;
1791 Op->EndLoc = E;
1792 return Op;
1793 }
1794
1795 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1796 unsigned ShiftAmount,
1797 SMLoc S, SMLoc E,
1798 MCContext &Ctx) {
1799 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1800 Op->ShiftedImm .Val = Val;
1801 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1802 Op->StartLoc = S;
1803 Op->EndLoc = E;
1804 return Op;
1805 }
1806
1807 static std::unique_ptr<AArch64Operand>
1808 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1809 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1810 Op->CondCode.Code = Code;
1811 Op->StartLoc = S;
1812 Op->EndLoc = E;
1813 return Op;
1814 }
1815
1816 static std::unique_ptr<AArch64Operand>
1817 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1818 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1819 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1820 Op->FPImm.IsExact = IsExact;
1821 Op->StartLoc = S;
1822 Op->EndLoc = S;
1823 return Op;
1824 }
1825
1826 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1827 StringRef Str,
1828 SMLoc S,
1829 MCContext &Ctx) {
1830 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1831 Op->Barrier.Val = Val;
1832 Op->Barrier.Data = Str.data();
1833 Op->Barrier.Length = Str.size();
1834 Op->StartLoc = S;
1835 Op->EndLoc = S;
1836 return Op;
1837 }
1838
1839 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1840 uint32_t MRSReg,
1841 uint32_t MSRReg,
1842 uint32_t PStateField,
1843 MCContext &Ctx) {
1844 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1845 Op->SysReg.Data = Str.data();
1846 Op->SysReg.Length = Str.size();
1847 Op->SysReg.MRSReg = MRSReg;
1848 Op->SysReg.MSRReg = MSRReg;
1849 Op->SysReg.PStateField = PStateField;
1850 Op->StartLoc = S;
1851 Op->EndLoc = S;
1852 return Op;
1853 }
1854
1855 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1856 SMLoc E, MCContext &Ctx) {
1857 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1858 Op->SysCRImm.Val = Val;
1859 Op->StartLoc = S;
1860 Op->EndLoc = E;
1861 return Op;
1862 }
1863
1864 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1865 StringRef Str,
1866 SMLoc S,
1867 MCContext &Ctx) {
1868 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1869 Op->Prefetch.Val = Val;
1870 Op->Barrier.Data = Str.data();
1871 Op->Barrier.Length = Str.size();
1872 Op->StartLoc = S;
1873 Op->EndLoc = S;
1874 return Op;
1875 }
1876
1877 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1878 StringRef Str,
1879 SMLoc S,
1880 MCContext &Ctx) {
1881 auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1882 Op->PSBHint.Val = Val;
1883 Op->PSBHint.Data = Str.data();
1884 Op->PSBHint.Length = Str.size();
1885 Op->StartLoc = S;
1886 Op->EndLoc = S;
1887 return Op;
1888 }
1889
1890 static std::unique_ptr<AArch64Operand>
1891 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1892 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1893 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1894 Op->ShiftExtend.Type = ShOp;
1895 Op->ShiftExtend.Amount = Val;
1896 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1897 Op->StartLoc = S;
1898 Op->EndLoc = E;
1899 return Op;
1900 }
1901};
1902
1903} // end anonymous namespace.
1904
1905void AArch64Operand::print(raw_ostream &OS) const {
1906 switch (Kind) {
1907 case k_FPImm:
1908 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
1909 if (!getFPImmIsExact())
1910 OS << " (inexact)";
1911 OS << ">";
1912 break;
1913 case k_Barrier: {
1914 StringRef Name = getBarrierName();
1915 if (!Name.empty())
1916 OS << "<barrier " << Name << ">";
1917 else
1918 OS << "<barrier invalid #" << getBarrier() << ">";
1919 break;
1920 }
1921 case k_Immediate:
1922 OS << *getImm();
1923 break;
1924 case k_ShiftedImm: {
1925 unsigned Shift = getShiftedImmShift();
1926 OS << "<shiftedimm ";
1927 OS << *getShiftedImmVal();
1928 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1929 break;
1930 }
1931 case k_CondCode:
1932 OS << "<condcode " << getCondCode() << ">";
1933 break;
1934 case k_VectorList: {
1935 OS << "<vectorlist ";
1936 unsigned Reg = getVectorListStart();
1937 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1938 OS << Reg + i << " ";
1939 OS << ">";
1940 break;
1941 }
1942 case k_VectorIndex:
1943 OS << "<vectorindex " << getVectorIndex() << ">";
1944 break;
1945 case k_SysReg:
1946 OS << "<sysreg: " << getSysReg() << '>';
1947 break;
1948 case k_Token:
1949 OS << "'" << getToken() << "'";
1950 break;
1951 case k_SysCR:
1952 OS << "c" << getSysCR();
1953 break;
1954 case k_Prefetch: {
1955 StringRef Name = getPrefetchName();
1956 if (!Name.empty())
1957 OS << "<prfop " << Name << ">";
1958 else
1959 OS << "<prfop invalid #" << getPrefetch() << ">";
1960 break;
1961 }
1962 case k_PSBHint:
1963 OS << getPSBHintName();
1964 break;
1965 case k_Register:
1966 OS << "<register " << getReg() << ">";
1967 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
1968 break;
1969 LLVM_FALLTHROUGH[[clang::fallthrough]];
1970 case k_ShiftExtend:
1971 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1972 << getShiftExtendAmount();
1973 if (!hasShiftExtendAmount())
1974 OS << "<imp>";
1975 OS << '>';
1976 break;
1977 }
1978}
1979
1980/// @name Auto-generated Match Functions
1981/// {
1982
1983static unsigned MatchRegisterName(StringRef Name);
1984
1985/// }
1986
1987static unsigned MatchNeonVectorRegName(StringRef Name) {
1988 return StringSwitch<unsigned>(Name.lower())
1989 .Case("v0", AArch64::Q0)
1990 .Case("v1", AArch64::Q1)
1991 .Case("v2", AArch64::Q2)
1992 .Case("v3", AArch64::Q3)
1993 .Case("v4", AArch64::Q4)
1994 .Case("v5", AArch64::Q5)
1995 .Case("v6", AArch64::Q6)
1996 .Case("v7", AArch64::Q7)
1997 .Case("v8", AArch64::Q8)
1998 .Case("v9", AArch64::Q9)
1999 .Case("v10", AArch64::Q10)
2000 .Case("v11", AArch64::Q11)
2001 .Case("v12", AArch64::Q12)
2002 .Case("v13", AArch64::Q13)
2003 .Case("v14", AArch64::Q14)
2004 .Case("v15", AArch64::Q15)
2005 .Case("v16", AArch64::Q16)
2006 .Case("v17", AArch64::Q17)
2007 .Case("v18", AArch64::Q18)
2008 .Case("v19", AArch64::Q19)
2009 .Case("v20", AArch64::Q20)
2010 .Case("v21", AArch64::Q21)
2011 .Case("v22", AArch64::Q22)
2012 .Case("v23", AArch64::Q23)
2013 .Case("v24", AArch64::Q24)
2014 .Case("v25", AArch64::Q25)
2015 .Case("v26", AArch64::Q26)
2016 .Case("v27", AArch64::Q27)
2017 .Case("v28", AArch64::Q28)
2018 .Case("v29", AArch64::Q29)
2019 .Case("v30", AArch64::Q30)
2020 .Case("v31", AArch64::Q31)
2021 .Default(0);
2022}
2023
2024/// Returns an optional pair of (#elements, element-width) if Suffix
2025/// is a valid vector kind. Where the number of elements in a vector
2026/// or the vector width is implicit or explicitly unknown (but still a
2027/// valid suffix kind), 0 is used.
2028static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2029 RegKind VectorKind) {
2030 std::pair<int, int> Res = {-1, -1};
2031
2032 switch (VectorKind) {
2033 case RegKind::NeonVector:
2034 Res =
2035 StringSwitch<std::pair<int, int>>(Suffix.lower())
2036 .Case("", {0, 0})
2037 .Case(".1d", {1, 64})
2038 .Case(".1q", {1, 128})
2039 // '.2h' needed for fp16 scalar pairwise reductions
2040 .Case(".2h", {2, 16})
2041 .Case(".2s", {2, 32})
2042 .Case(".2d", {2, 64})
2043 // '.4b' is another special case for the ARMv8.2a dot product
2044 // operand
2045 .Case(".4b", {4, 8})
2046 .Case(".4h", {4, 16})
2047 .Case(".4s", {4, 32})
2048 .Case(".8b", {8, 8})
2049 .Case(".8h", {8, 16})
2050 .Case(".16b", {16, 8})
2051 // Accept the width neutral ones, too, for verbose syntax. If those
2052 // aren't used in the right places, the token operand won't match so
2053 // all will work out.
2054 .Case(".b", {0, 8})
2055 .Case(".h", {0, 16})
2056 .Case(".s", {0, 32})
2057 .Case(".d", {0, 64})
2058 .Default({-1, -1});
2059 break;
2060 case RegKind::SVEPredicateVector:
2061 case RegKind::SVEDataVector:
2062 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2063 .Case("", {0, 0})
2064 .Case(".b", {0, 8})
2065 .Case(".h", {0, 16})
2066 .Case(".s", {0, 32})
2067 .Case(".d", {0, 64})
2068 .Case(".q", {0, 128})
2069 .Default({-1, -1});
2070 break;
2071 default:
2072 llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2072)
;
2073 }
2074
2075 if (Res == std::make_pair(-1, -1))
2076 return Optional<std::pair<int, int>>();
2077
2078 return Optional<std::pair<int, int>>(Res);
2079}
2080
2081static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2082 return parseVectorKind(Suffix, VectorKind).hasValue();
2083}
2084
2085static unsigned matchSVEDataVectorRegName(StringRef Name) {
2086 return StringSwitch<unsigned>(Name.lower())
2087 .Case("z0", AArch64::Z0)
2088 .Case("z1", AArch64::Z1)
2089 .Case("z2", AArch64::Z2)
2090 .Case("z3", AArch64::Z3)
2091 .Case("z4", AArch64::Z4)
2092 .Case("z5", AArch64::Z5)
2093 .Case("z6", AArch64::Z6)
2094 .Case("z7", AArch64::Z7)
2095 .Case("z8", AArch64::Z8)
2096 .Case("z9", AArch64::Z9)
2097 .Case("z10", AArch64::Z10)
2098 .Case("z11", AArch64::Z11)
2099 .Case("z12", AArch64::Z12)
2100 .Case("z13", AArch64::Z13)
2101 .Case("z14", AArch64::Z14)
2102 .Case("z15", AArch64::Z15)
2103 .Case("z16", AArch64::Z16)
2104 .Case("z17", AArch64::Z17)
2105 .Case("z18", AArch64::Z18)
2106 .Case("z19", AArch64::Z19)
2107 .Case("z20", AArch64::Z20)
2108 .Case("z21", AArch64::Z21)
2109 .Case("z22", AArch64::Z22)
2110 .Case("z23", AArch64::Z23)
2111 .Case("z24", AArch64::Z24)
2112 .Case("z25", AArch64::Z25)
2113 .Case("z26", AArch64::Z26)
2114 .Case("z27", AArch64::Z27)
2115 .Case("z28", AArch64::Z28)
2116 .Case("z29", AArch64::Z29)
2117 .Case("z30", AArch64::Z30)
2118 .Case("z31", AArch64::Z31)
2119 .Default(0);
2120}
2121
2122static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2123 return StringSwitch<unsigned>(Name.lower())
2124 .Case("p0", AArch64::P0)
2125 .Case("p1", AArch64::P1)
2126 .Case("p2", AArch64::P2)
2127 .Case("p3", AArch64::P3)
2128 .Case("p4", AArch64::P4)
2129 .Case("p5", AArch64::P5)
2130 .Case("p6", AArch64::P6)
2131 .Case("p7", AArch64::P7)
2132 .Case("p8", AArch64::P8)
2133 .Case("p9", AArch64::P9)
2134 .Case("p10", AArch64::P10)
2135 .Case("p11", AArch64::P11)
2136 .Case("p12", AArch64::P12)
2137 .Case("p13", AArch64::P13)
2138 .Case("p14", AArch64::P14)
2139 .Case("p15", AArch64::P15)
2140 .Default(0);
2141}
2142
2143bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2144 SMLoc &EndLoc) {
2145 StartLoc = getLoc();
2146 auto Res = tryParseScalarRegister(RegNo);
2147 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2148 return Res != MatchOperand_Success;
2149}
2150
2151// Matches a register name or register alias previously defined by '.req'
2152unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2153 RegKind Kind) {
2154 unsigned RegNum = 0;
2155 if ((RegNum = matchSVEDataVectorRegName(Name)))
2156 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2157
2158 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2159 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2160
2161 if ((RegNum = MatchNeonVectorRegName(Name)))
2162 return Kind == RegKind::NeonVector ? RegNum : 0;
2163
2164 // The parsed register must be of RegKind Scalar
2165 if ((RegNum = MatchRegisterName(Name)))
2166 return Kind == RegKind::Scalar ? RegNum : 0;
2167
2168 if (!RegNum) {
2169 // Handle a few common aliases of registers.
2170 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2171 .Case("fp", AArch64::FP)
2172 .Case("lr", AArch64::LR)
2173 .Case("x31", AArch64::XZR)
2174 .Case("w31", AArch64::WZR)
2175 .Default(0))
2176 return Kind == RegKind::Scalar ? RegNum : 0;
2177
2178 // Check for aliases registered via .req. Canonicalize to lower case.
2179 // That's more consistent since register names are case insensitive, and
2180 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2181 auto Entry = RegisterReqs.find(Name.lower());
2182 if (Entry == RegisterReqs.end())
2183 return 0;
2184
2185 // set RegNum if the match is the right kind of register
2186 if (Kind == Entry->getValue().first)
2187 RegNum = Entry->getValue().second;
2188 }
2189 return RegNum;
2190}
2191
2192/// tryParseScalarRegister - Try to parse a register name. The token must be an
2193/// Identifier when called, and if it is a register name the token is eaten and
2194/// the register is added to the operand list.
2195OperandMatchResultTy
2196AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2197 MCAsmParser &Parser = getParser();
2198 const AsmToken &Tok = Parser.getTok();
2199 if (Tok.isNot(AsmToken::Identifier))
2200 return MatchOperand_NoMatch;
2201
2202 std::string lowerCase = Tok.getString().lower();
2203 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2204 if (Reg == 0)
2205 return MatchOperand_NoMatch;
2206
2207 RegNum = Reg;
2208 Parser.Lex(); // Eat identifier token.
2209 return MatchOperand_Success;
2210}
2211
2212/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2213OperandMatchResultTy
2214AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2215 MCAsmParser &Parser = getParser();
2216 SMLoc S = getLoc();
2217
2218 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2219 Error(S, "Expected cN operand where 0 <= N <= 15");
2220 return MatchOperand_ParseFail;
2221 }
2222
2223 StringRef Tok = Parser.getTok().getIdentifier();
2224 if (Tok[0] != 'c' && Tok[0] != 'C') {
2225 Error(S, "Expected cN operand where 0 <= N <= 15");
2226 return MatchOperand_ParseFail;
2227 }
2228
2229 uint32_t CRNum;
2230 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2231 if (BadNum || CRNum > 15) {
2232 Error(S, "Expected cN operand where 0 <= N <= 15");
2233 return MatchOperand_ParseFail;
2234 }
2235
2236 Parser.Lex(); // Eat identifier token.
2237 Operands.push_back(
2238 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2239 return MatchOperand_Success;
2240}
2241
2242/// tryParsePrefetch - Try to parse a prefetch operand.
2243template <bool IsSVEPrefetch>
2244OperandMatchResultTy
2245AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2246 MCAsmParser &Parser = getParser();
2247 SMLoc S = getLoc();
2248 const AsmToken &Tok = Parser.getTok();
2249
2250 auto LookupByName = [](StringRef N) {
2251 if (IsSVEPrefetch) {
2252 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2253 return Optional<unsigned>(Res->Encoding);
2254 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2255 return Optional<unsigned>(Res->Encoding);
2256 return Optional<unsigned>();
2257 };
2258
2259 auto LookupByEncoding = [](unsigned E) {
2260 if (IsSVEPrefetch) {
2261 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2262 return Optional<StringRef>(Res->Name);
2263 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2264 return Optional<StringRef>(Res->Name);
2265 return Optional<StringRef>();
2266 };
2267 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2268
2269 // Either an identifier for named values or a 5-bit immediate.
2270 // Eat optional hash.
2271 if (parseOptionalToken(AsmToken::Hash) ||
2272 Tok.is(AsmToken::Integer)) {
2273 const MCExpr *ImmVal;
2274 if (getParser().parseExpression(ImmVal))
2275 return MatchOperand_ParseFail;
2276
2277 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2278 if (!MCE) {
2279 TokError("immediate value expected for prefetch operand");
2280 return MatchOperand_ParseFail;
2281 }
2282 unsigned prfop = MCE->getValue();
2283 if (prfop > MaxVal) {
2284 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2285 "] expected");
2286 return MatchOperand_ParseFail;
2287 }
2288
2289 auto PRFM = LookupByEncoding(MCE->getValue());
2290 Operands.push_back(AArch64Operand::CreatePrefetch(
2291 prfop, PRFM.getValueOr(""), S, getContext()));
2292 return MatchOperand_Success;
2293 }
2294
2295 if (Tok.isNot(AsmToken::Identifier)) {
2296 TokError("prefetch hint expected");
2297 return MatchOperand_ParseFail;
2298 }
2299
2300 auto PRFM = LookupByName(Tok.getString());
2301 if (!PRFM) {
2302 TokError("prefetch hint expected");
2303 return MatchOperand_ParseFail;
2304 }
2305
2306 Parser.Lex(); // Eat identifier token.
2307 Operands.push_back(AArch64Operand::CreatePrefetch(
2308 *PRFM, Tok.getString(), S, getContext()));
2309 return MatchOperand_Success;
2310}
2311
2312/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2313OperandMatchResultTy
2314AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2315 MCAsmParser &Parser = getParser();
2316 SMLoc S = getLoc();
2317 const AsmToken &Tok = Parser.getTok();
2318 if (Tok.isNot(AsmToken::Identifier)) {
2319 TokError("invalid operand for instruction");
2320 return MatchOperand_ParseFail;
2321 }
2322
2323 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2324 if (!PSB) {
2325 TokError("invalid operand for instruction");
2326 return MatchOperand_ParseFail;
2327 }
2328
2329 Parser.Lex(); // Eat identifier token.
2330 Operands.push_back(AArch64Operand::CreatePSBHint(
2331 PSB->Encoding, Tok.getString(), S, getContext()));
2332 return MatchOperand_Success;
2333}
2334
2335/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2336/// instruction.
2337OperandMatchResultTy
2338AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2339 MCAsmParser &Parser = getParser();
2340 SMLoc S = getLoc();
2341 const MCExpr *Expr;
1
'Expr' declared without an initial value
2342
2343 if (Parser.getTok().is(AsmToken::Hash)) {
2
Taking false branch
2344 Parser.Lex(); // Eat hash token.
2345 }
2346
2347 if (parseSymbolicImmVal(Expr))
3
Calling 'AArch64AsmParser::parseSymbolicImmVal'
8
Returning from 'AArch64AsmParser::parseSymbolicImmVal'
9
Assuming the condition is false
10
Taking false branch
2348 return MatchOperand_ParseFail;
2349
2350 AArch64MCExpr::VariantKind ELFRefKind;
2351 MCSymbolRefExpr::VariantKind DarwinRefKind;
2352 int64_t Addend;
2353 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
11
1st function call argument is an uninitialized value
2354 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2355 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2356 // No modifier was specified at all; this is the syntax for an ELF basic
2357 // ADRP relocation (unfortunately).
2358 Expr =
2359 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2360 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2361 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2362 Addend != 0) {
2363 Error(S, "gotpage label reference not allowed an addend");
2364 return MatchOperand_ParseFail;
2365 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2366 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2367 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2368 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2369 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2370 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2371 // The operand must be an @page or @gotpage qualified symbolref.
2372 Error(S, "page or gotpage label reference expected");
2373 return MatchOperand_ParseFail;
2374 }
2375 }
2376
2377 // We have either a label reference possibly with addend or an immediate. The
2378 // addend is a raw value here. The linker will adjust it to only reference the
2379 // page.
2380 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2381 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2382
2383 return MatchOperand_Success;
2384}
2385
2386/// tryParseAdrLabel - Parse and validate a source label for the ADR
2387/// instruction.
2388OperandMatchResultTy
2389AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2390 SMLoc S = getLoc();
2391 const MCExpr *Expr;
2392
2393 const AsmToken &Tok = getParser().getTok();
2394 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
2395 if (getParser().parseExpression(Expr))
2396 return MatchOperand_ParseFail;
2397
2398 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2399 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2400
2401 return MatchOperand_Success;
2402 }
2403 return MatchOperand_NoMatch;
2404}
2405
2406/// tryParseFPImm - A floating point immediate expression operand.
2407template<bool AddFPZeroAsLiteral>
2408OperandMatchResultTy
2409AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2410 MCAsmParser &Parser = getParser();
2411 SMLoc S = getLoc();
2412
2413 bool Hash = parseOptionalToken(AsmToken::Hash);
2414
2415 // Handle negation, as that still comes through as a separate token.
2416 bool isNegative = parseOptionalToken(AsmToken::Minus);
2417
2418 const AsmToken &Tok = Parser.getTok();
2419 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2420 if (!Hash)
2421 return MatchOperand_NoMatch;
2422 TokError("invalid floating point immediate");
2423 return MatchOperand_ParseFail;
2424 }
2425
2426 // Parse hexadecimal representation.
2427 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2428 if (Tok.getIntVal() > 255 || isNegative) {
2429 TokError("encoded floating point value out of range");
2430 return MatchOperand_ParseFail;
2431 }
2432
2433 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2434 Operands.push_back(
2435 AArch64Operand::CreateFPImm(F, true, S, getContext()));
2436 } else {
2437 // Parse FP representation.
2438 APFloat RealVal(APFloat::IEEEdouble());
2439 auto Status =
2440 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2441 if (isNegative)
2442 RealVal.changeSign();
2443
2444 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2445 Operands.push_back(
2446 AArch64Operand::CreateToken("#0", false, S, getContext()));
2447 Operands.push_back(
2448 AArch64Operand::CreateToken(".0", false, S, getContext()));
2449 } else
2450 Operands.push_back(AArch64Operand::CreateFPImm(
2451 RealVal, Status == APFloat::opOK, S, getContext()));
2452 }
2453
2454 Parser.Lex(); // Eat the token.
2455
2456 return MatchOperand_Success;
2457}
2458
2459/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2460/// a shift suffix, for example '#1, lsl #12'.
2461OperandMatchResultTy
2462AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2463 MCAsmParser &Parser = getParser();
2464 SMLoc S = getLoc();
2465
2466 if (Parser.getTok().is(AsmToken::Hash))
2467 Parser.Lex(); // Eat '#'
2468 else if (Parser.getTok().isNot(AsmToken::Integer))
2469 // Operand should start from # or should be integer, emit error otherwise.
2470 return MatchOperand_NoMatch;
2471
2472 const MCExpr *Imm;
2473 if (parseSymbolicImmVal(Imm))
2474 return MatchOperand_ParseFail;
2475 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2476 SMLoc E = Parser.getTok().getLoc();
2477 Operands.push_back(
2478 AArch64Operand::CreateImm(Imm, S, E, getContext()));
2479 return MatchOperand_Success;
2480 }
2481
2482 // Eat ','
2483 Parser.Lex();
2484
2485 // The optional operand must be "lsl #N" where N is non-negative.
2486 if (!Parser.getTok().is(AsmToken::Identifier) ||
2487 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2488 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2489 return MatchOperand_ParseFail;
2490 }
2491
2492 // Eat 'lsl'
2493 Parser.Lex();
2494
2495 parseOptionalToken(AsmToken::Hash);
2496
2497 if (Parser.getTok().isNot(AsmToken::Integer)) {
2498 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2499 return MatchOperand_ParseFail;
2500 }
2501
2502 int64_t ShiftAmount = Parser.getTok().getIntVal();
2503
2504 if (ShiftAmount < 0) {
2505 Error(Parser.getTok().getLoc(), "positive shift amount required");
2506 return MatchOperand_ParseFail;
2507 }
2508 Parser.Lex(); // Eat the number
2509
2510 // Just in case the optional lsl #0 is used for immediates other than zero.
2511 if (ShiftAmount == 0 && Imm != 0) {
2512 SMLoc E = Parser.getTok().getLoc();
2513 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2514 return MatchOperand_Success;
2515 }
2516
2517 SMLoc E = Parser.getTok().getLoc();
2518 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2519 S, E, getContext()));
2520 return MatchOperand_Success;
2521}
2522
2523/// parseCondCodeString - Parse a Condition Code string.
2524AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2525 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2526 .Case("eq", AArch64CC::EQ)
2527 .Case("ne", AArch64CC::NE)
2528 .Case("cs", AArch64CC::HS)
2529 .Case("hs", AArch64CC::HS)
2530 .Case("cc", AArch64CC::LO)
2531 .Case("lo", AArch64CC::LO)
2532 .Case("mi", AArch64CC::MI)
2533 .Case("pl", AArch64CC::PL)
2534 .Case("vs", AArch64CC::VS)
2535 .Case("vc", AArch64CC::VC)
2536 .Case("hi", AArch64CC::HI)
2537 .Case("ls", AArch64CC::LS)
2538 .Case("ge", AArch64CC::GE)
2539 .Case("lt", AArch64CC::LT)
2540 .Case("gt", AArch64CC::GT)
2541 .Case("le", AArch64CC::LE)
2542 .Case("al", AArch64CC::AL)
2543 .Case("nv", AArch64CC::NV)
2544 .Default(AArch64CC::Invalid);
2545
2546 if (CC == AArch64CC::Invalid &&
2547 getSTI().getFeatureBits()[AArch64::FeatureSVE])
2548 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2549 .Case("none", AArch64CC::EQ)
2550 .Case("any", AArch64CC::NE)
2551 .Case("nlast", AArch64CC::HS)
2552 .Case("last", AArch64CC::LO)
2553 .Case("first", AArch64CC::MI)
2554 .Case("nfrst", AArch64CC::PL)
2555 .Case("pmore", AArch64CC::HI)
2556 .Case("plast", AArch64CC::LS)
2557 .Case("tcont", AArch64CC::GE)
2558 .Case("tstop", AArch64CC::LT)
2559 .Default(AArch64CC::Invalid);
2560
2561 return CC;
2562}
2563
2564/// parseCondCode - Parse a Condition Code operand.
2565bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2566 bool invertCondCode) {
2567 MCAsmParser &Parser = getParser();
2568 SMLoc S = getLoc();
2569 const AsmToken &Tok = Parser.getTok();
2570 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")(static_cast <bool> (Tok.is(AsmToken::Identifier) &&
"Token is not an Identifier") ? void (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2570, __extension__ __PRETTY_FUNCTION__))
;
2571
2572 StringRef Cond = Tok.getString();
2573 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2574 if (CC == AArch64CC::Invalid)
2575 return TokError("invalid condition code");
2576 Parser.Lex(); // Eat identifier token.
2577
2578 if (invertCondCode) {
2579 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2580 return TokError("condition codes AL and NV are invalid for this instruction");
2581 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2582 }
2583
2584 Operands.push_back(
2585 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2586 return false;
2587}
2588
2589/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2590/// them if present.
2591OperandMatchResultTy
2592AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2593 MCAsmParser &Parser = getParser();
2594 const AsmToken &Tok = Parser.getTok();
2595 std::string LowerID = Tok.getString().lower();
2596 AArch64_AM::ShiftExtendType ShOp =
2597 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2598 .Case("lsl", AArch64_AM::LSL)
2599 .Case("lsr", AArch64_AM::LSR)
2600 .Case("asr", AArch64_AM::ASR)
2601 .Case("ror", AArch64_AM::ROR)
2602 .Case("msl", AArch64_AM::MSL)
2603 .Case("uxtb", AArch64_AM::UXTB)
2604 .Case("uxth", AArch64_AM::UXTH)
2605 .Case("uxtw", AArch64_AM::UXTW)
2606 .Case("uxtx", AArch64_AM::UXTX)
2607 .Case("sxtb", AArch64_AM::SXTB)
2608 .Case("sxth", AArch64_AM::SXTH)
2609 .Case("sxtw", AArch64_AM::SXTW)
2610 .Case("sxtx", AArch64_AM::SXTX)
2611 .Default(AArch64_AM::InvalidShiftExtend);
2612
2613 if (ShOp == AArch64_AM::InvalidShiftExtend)
2614 return MatchOperand_NoMatch;
2615
2616 SMLoc S = Tok.getLoc();
2617 Parser.Lex();
2618
2619 bool Hash = parseOptionalToken(AsmToken::Hash);
2620
2621 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2622 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2623 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2624 ShOp == AArch64_AM::MSL) {
2625 // We expect a number here.
2626 TokError("expected #imm after shift specifier");
2627 return MatchOperand_ParseFail;
2628 }
2629
2630 // "extend" type operations don't need an immediate, #0 is implicit.
2631 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2632 Operands.push_back(
2633 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2634 return MatchOperand_Success;
2635 }
2636
2637 // Make sure we do actually have a number, identifier or a parenthesized
2638 // expression.
2639 SMLoc E = Parser.getTok().getLoc();
2640 if (!Parser.getTok().is(AsmToken::Integer) &&
2641 !Parser.getTok().is(AsmToken::LParen) &&
2642 !Parser.getTok().is(AsmToken::Identifier)) {
2643 Error(E, "expected integer shift amount");
2644 return MatchOperand_ParseFail;
2645 }
2646
2647 const MCExpr *ImmVal;
2648 if (getParser().parseExpression(ImmVal))
2649 return MatchOperand_ParseFail;
2650
2651 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2652 if (!MCE) {
2653 Error(E, "expected constant '#imm' after shift specifier");
2654 return MatchOperand_ParseFail;
2655 }
2656
2657 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2658 Operands.push_back(AArch64Operand::CreateShiftExtend(
2659 ShOp, MCE->getValue(), true, S, E, getContext()));
2660 return MatchOperand_Success;
2661}
2662
2663static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2664 if (FBS[AArch64::HasV8_1aOps])
2665 Str += "ARMv8.1a";
2666 else if (FBS[AArch64::HasV8_2aOps])
2667 Str += "ARMv8.2a";
2668 else if (FBS[AArch64::HasV8_3aOps])
2669 Str += "ARMv8.3a";
2670 else if (FBS[AArch64::HasV8_4aOps])
2671 Str += "ARMv8.4a";
2672 else
2673 Str += "(unknown)";
2674}
2675
2676void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2677 SMLoc S) {
2678 const uint16_t Op2 = Encoding & 7;
2679 const uint16_t Cm = (Encoding & 0x78) >> 3;
2680 const uint16_t Cn = (Encoding & 0x780) >> 7;
2681 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2682
2683 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2684
2685 Operands.push_back(
2686 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2687 Operands.push_back(
2688 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2689 Operands.push_back(
2690 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2691 Expr = MCConstantExpr::create(Op2, getContext());
2692 Operands.push_back(
2693 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2694}
2695
2696/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2697/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2698bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2699 OperandVector &Operands) {
2700 if (Name.find('.') != StringRef::npos)
2701 return TokError("invalid operand");
2702
2703 Mnemonic = Name;
2704 Operands.push_back(
2705 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2706
2707 MCAsmParser &Parser = getParser();
2708 const AsmToken &Tok = Parser.getTok();
2709 StringRef Op = Tok.getString();
2710 SMLoc S = Tok.getLoc();
2711
2712 if (Mnemonic == "ic") {
2713 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2714 if (!IC)
2715 return TokError("invalid operand for IC instruction");
2716 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2717 std::string Str("IC " + std::string(IC->Name) + " requires ");
2718 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2719 return TokError(Str.c_str());
2720 }
2721 createSysAlias(IC->Encoding, Operands, S);
2722 } else if (Mnemonic == "dc") {
2723 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2724 if (!DC)
2725 return TokError("invalid operand for DC instruction");
2726 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2727 std::string Str("DC " + std::string(DC->Name) + " requires ");
2728 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
2729 return TokError(Str.c_str());
2730 }
2731 createSysAlias(DC->Encoding, Operands, S);
2732 } else if (Mnemonic == "at") {
2733 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2734 if (!AT)
2735 return TokError("invalid operand for AT instruction");
2736 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2737 std::string Str("AT " + std::string(AT->Name) + " requires ");
2738 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
2739 return TokError(Str.c_str());
2740 }
2741 createSysAlias(AT->Encoding, Operands, S);
2742 } else if (Mnemonic == "tlbi") {
2743 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2744 if (!TLBI)
2745 return TokError("invalid operand for TLBI instruction");
2746 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2747 std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2748 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
2749 return TokError(Str.c_str());
2750 }
2751 createSysAlias(TLBI->Encoding, Operands, S);
2752 }
2753
2754 Parser.Lex(); // Eat operand.
2755
2756 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2757 bool HasRegister = false;
2758
2759 // Check for the optional register operand.
2760 if (parseOptionalToken(AsmToken::Comma)) {
2761 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2762 return TokError("expected register operand");
2763 HasRegister = true;
2764 }
2765
2766 if (ExpectRegister && !HasRegister)
2767 return TokError("specified " + Mnemonic + " op requires a register");
2768 else if (!ExpectRegister && HasRegister)
2769 return TokError("specified " + Mnemonic + " op does not use a register");
2770
2771 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2772 return true;
2773
2774 return false;
2775}
2776
2777OperandMatchResultTy
2778AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2779 MCAsmParser &Parser = getParser();
2780 const AsmToken &Tok = Parser.getTok();
2781
2782 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
2783 TokError("'csync' operand expected");
2784 return MatchOperand_ParseFail;
2785 // Can be either a #imm style literal or an option name
2786 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
2787 // Immediate operand.
2788 const MCExpr *ImmVal;
2789 SMLoc ExprLoc = getLoc();
2790 if (getParser().parseExpression(ImmVal))
2791 return MatchOperand_ParseFail;
2792 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2793 if (!MCE) {
2794 Error(ExprLoc, "immediate value expected for barrier operand");
2795 return MatchOperand_ParseFail;
2796 }
2797 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2798 Error(ExprLoc, "barrier operand out of range");
2799 return MatchOperand_ParseFail;
2800 }
2801 auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
2802 Operands.push_back(AArch64Operand::CreateBarrier(
2803 MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
2804 return MatchOperand_Success;
2805 }
2806
2807 if (Tok.isNot(AsmToken::Identifier)) {
2808 TokError("invalid operand for instruction");
2809 return MatchOperand_ParseFail;
2810 }
2811
2812 auto TSB = AArch64TSB::lookupTSBByName(Tok.getString());
2813 // The only valid named option for ISB is 'sy'
2814 auto DB = AArch64DB::lookupDBByName(Tok.getString());
2815 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
2816 TokError("'sy' or #imm operand expected");
2817 return MatchOperand_ParseFail;
2818 // The only valid named option for TSB is 'csync'
2819 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
2820 TokError("'csync' operand expected");
2821 return MatchOperand_ParseFail;
2822 } else if (!DB && !TSB) {
2823 TokError("invalid barrier option name");
2824 return MatchOperand_ParseFail;
2825 }
2826
2827 Operands.push_back(AArch64Operand::CreateBarrier(
2828 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext()));
2829 Parser.Lex(); // Consume the option
2830
2831 return MatchOperand_Success;
2832}
2833
2834OperandMatchResultTy
2835AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2836 MCAsmParser &Parser = getParser();
2837 const AsmToken &Tok = Parser.getTok();
2838
2839 if (Tok.isNot(AsmToken::Identifier))
2840 return MatchOperand_NoMatch;
2841
2842 int MRSReg, MSRReg;
2843 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
2844 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
2845 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
2846 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
2847 } else
2848 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
2849
2850 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
2851 unsigned PStateImm = -1;
2852 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
2853 PStateImm = PState->Encoding;
2854
2855 Operands.push_back(
2856 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
2857 PStateImm, getContext()));
2858 Parser.Lex(); // Eat identifier
2859
2860 return MatchOperand_Success;
2861}
2862
2863/// tryParseNeonVectorRegister - Parse a vector register operand.
2864bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
2865 MCAsmParser &Parser = getParser();
2866 if (Parser.getTok().isNot(AsmToken::Identifier))
2867 return true;
2868
2869 SMLoc S = getLoc();
2870 // Check for a vector register specifier first.
2871 StringRef Kind;
2872 unsigned Reg;
2873 OperandMatchResultTy Res =
2874 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
2875 if (Res != MatchOperand_Success)
2876 return true;
2877
2878 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
2879 if (!KindRes)
2880 return true;
2881
2882 unsigned ElementWidth = KindRes->second;
2883 Operands.push_back(
2884 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
2885 S, getLoc(), getContext()));
2886
2887 // If there was an explicit qualifier, that goes on as a literal text
2888 // operand.
2889 if (!Kind.empty())
2890 Operands.push_back(
2891 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2892
2893 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
2894}
2895
2896OperandMatchResultTy
2897AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
2898 SMLoc SIdx = getLoc();
2899 if (parseOptionalToken(AsmToken::LBrac)) {
2900 const MCExpr *ImmVal;
2901 if (getParser().parseExpression(ImmVal))
2902 return MatchOperand_NoMatch;
2903 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2904 if (!MCE) {
2905 TokError("immediate value expected for vector index");
2906 return MatchOperand_ParseFail;;
2907 }
2908
2909 SMLoc E = getLoc();
2910
2911 if (parseToken(AsmToken::RBrac, "']' expected"))
2912 return MatchOperand_ParseFail;;
2913
2914 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2915 E, getContext()));
2916 return MatchOperand_Success;
2917 }
2918
2919 return MatchOperand_NoMatch;
2920}
2921
2922// tryParseVectorRegister - Try to parse a vector register name with
2923// optional kind specifier. If it is a register specifier, eat the token
2924// and return it.
2925OperandMatchResultTy
2926AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
2927 RegKind MatchKind) {
2928 MCAsmParser &Parser = getParser();
2929 const AsmToken &Tok = Parser.getTok();
2930
2931 if (Tok.isNot(AsmToken::Identifier))
2932 return MatchOperand_NoMatch;
2933
2934 StringRef Name = Tok.getString();
2935 // If there is a kind specifier, it's separated from the register name by
2936 // a '.'.
2937 size_t Start = 0, Next = Name.find('.');
2938 StringRef Head = Name.slice(Start, Next);
2939 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
2940
2941 if (RegNum) {
2942 if (Next != StringRef::npos) {
2943 Kind = Name.slice(Next, StringRef::npos);
2944 if (!isValidVectorKind(Kind, MatchKind)) {
2945 TokError("invalid vector kind qualifier");
2946 return MatchOperand_ParseFail;
2947 }
2948 }
2949 Parser.Lex(); // Eat the register token.
2950
2951 Reg = RegNum;
2952 return MatchOperand_Success;
2953 }
2954
2955 return MatchOperand_NoMatch;
2956}
2957
2958/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
2959OperandMatchResultTy
2960AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
2961 // Check for a SVE predicate register specifier first.
2962 const SMLoc S = getLoc();
2963 StringRef Kind;
2964 unsigned RegNum;
2965 auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
2966 if (Res != MatchOperand_Success)
2967 return Res;
2968
2969 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
2970 if (!KindRes)
2971 return MatchOperand_NoMatch;
2972
2973 unsigned ElementWidth = KindRes->second;
2974 Operands.push_back(AArch64Operand::CreateVectorReg(
2975 RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
2976 getLoc(), getContext()));
2977
2978 // Not all predicates are followed by a '/m' or '/z'.
2979 MCAsmParser &Parser = getParser();
2980 if (Parser.getTok().isNot(AsmToken::Slash))
2981 return MatchOperand_Success;
2982
2983 // But when they do they shouldn't have an element type suffix.
2984 if (!Kind.empty()) {
2985 Error(S, "not expecting size suffix");
2986 return MatchOperand_ParseFail;
2987 }
2988
2989 // Add a literal slash as operand
2990 Operands.push_back(
2991 AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
2992
2993 Parser.Lex(); // Eat the slash.
2994
2995 // Zeroing or merging?
2996 auto Pred = Parser.getTok().getString().lower();
2997 if (Pred != "z" && Pred != "m") {
2998 Error(getLoc(), "expecting 'm' or 'z' predication");
2999 return MatchOperand_ParseFail;
3000 }
3001
3002 // Add zero/merge token.
3003 const char *ZM = Pred == "z" ? "z" : "m";
3004 Operands.push_back(
3005 AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3006
3007 Parser.Lex(); // Eat zero/merge token.
3008 return MatchOperand_Success;
3009}
3010
3011/// parseRegister - Parse a register operand.
3012bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3013 // Try for a Neon vector register.
3014 if (!tryParseNeonVectorRegister(Operands))
3015 return false;
3016
3017 // Otherwise try for a scalar register.
3018 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3019 return false;
3020
3021 return true;
3022}
3023
3024bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3025 MCAsmParser &Parser = getParser();
3026 bool HasELFModifier = false;
3027 AArch64MCExpr::VariantKind RefKind;
3028
3029 if (parseOptionalToken(AsmToken::Colon)) {
4
Assuming the condition is true
5
Taking true branch
3030 HasELFModifier = true;
3031
3032 if (Parser.getTok().isNot(AsmToken::Identifier))
6
Taking true branch
3033 return TokError("expect relocation specifier in operand after ':'");
7
Returning without writing to 'ImmVal'
3034
3035 std::string LowerCase = Parser.getTok().getIdentifier().lower();
3036 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3037 .Case("lo12", AArch64MCExpr::VK_LO12)
3038 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3039 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3040 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3041 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3042 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3043 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3044 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3045 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3046 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3047 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3048 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3049 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3050 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3051 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3052 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3053 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3054 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3055 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3056 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3057 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3058 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3059 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3060 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3061 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3062 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3063 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3064 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3065 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3066 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3067 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3068 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3069 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3070 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3071 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3072 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3073 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3074 .Default(AArch64MCExpr::VK_INVALID);
3075
3076 if (RefKind == AArch64MCExpr::VK_INVALID)
3077 return TokError("expect relocation specifier in operand after ':'");
3078
3079 Parser.Lex(); // Eat identifier
3080
3081 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3082 return true;
3083 }
3084
3085 if (getParser().parseExpression(ImmVal))
3086 return true;
3087
3088 if (HasELFModifier)
3089 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3090
3091 return false;
3092}
3093
3094template <RegKind VectorKind>
3095OperandMatchResultTy
3096AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3097 bool ExpectMatch) {
3098 MCAsmParser &Parser = getParser();
3099 if (!Parser.getTok().is(AsmToken::LCurly))
3100 return MatchOperand_NoMatch;
3101
3102 // Wrapper around parse function
3103 auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3104 bool NoMatchIsError) {
3105 auto RegTok = Parser.getTok();
3106 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3107 if (ParseRes == MatchOperand_Success) {
3108 if (parseVectorKind(Kind, VectorKind))
3109 return ParseRes;
3110 llvm_unreachable("Expected a valid vector kind")::llvm::llvm_unreachable_internal("Expected a valid vector kind"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3110)
;
3111 }
3112
3113 if (RegTok.isNot(AsmToken::Identifier) ||
3114 ParseRes == MatchOperand_ParseFail ||
3115 (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3116 Error(Loc, "vector register expected");
3117 return MatchOperand_ParseFail;
3118 }
3119
3120 return MatchOperand_NoMatch;
3121 };
3122
3123 SMLoc S = getLoc();
3124 auto LCurly = Parser.getTok();
3125 Parser.Lex(); // Eat left bracket token.
3126
3127 StringRef Kind;
3128 unsigned FirstReg;
3129 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3130
3131 // Put back the original left bracket if there was no match, so that
3132 // different types of list-operands can be matched (e.g. SVE, Neon).
3133 if (ParseRes == MatchOperand_NoMatch)
3134 Parser.getLexer().UnLex(LCurly);
3135
3136 if (ParseRes != MatchOperand_Success)
3137 return ParseRes;
3138
3139 int64_t PrevReg = FirstReg;
3140 unsigned Count = 1;
3141
3142 if (parseOptionalToken(AsmToken::Minus)) {
3143 SMLoc Loc = getLoc();
3144 StringRef NextKind;
3145
3146 unsigned Reg;
3147 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3148 if (ParseRes != MatchOperand_Success)
3149 return ParseRes;
3150
3151 // Any Kind suffices must match on all regs in the list.
3152 if (Kind != NextKind) {
3153 Error(Loc, "mismatched register size suffix");
3154 return MatchOperand_ParseFail;
3155 }
3156
3157 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3158
3159 if (Space == 0 || Space > 3) {
3160 Error(Loc, "invalid number of vectors");
3161 return MatchOperand_ParseFail;
3162 }
3163
3164 Count += Space;
3165 }
3166 else {
3167 while (parseOptionalToken(AsmToken::Comma)) {
3168 SMLoc Loc = getLoc();
3169 StringRef NextKind;
3170 unsigned Reg;
3171 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3172 if (ParseRes != MatchOperand_Success)
3173 return ParseRes;
3174
3175 // Any Kind suffices must match on all regs in the list.
3176 if (Kind != NextKind) {
3177 Error(Loc, "mismatched register size suffix");
3178 return MatchOperand_ParseFail;
3179 }
3180
3181 // Registers must be incremental (with wraparound at 31)
3182 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3183 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3184 Error(Loc, "registers must be sequential");
3185 return MatchOperand_ParseFail;
3186 }
3187
3188 PrevReg = Reg;
3189 ++Count;
3190 }
3191 }
3192
3193 if (parseToken(AsmToken::RCurly, "'}' expected"))
3194 return MatchOperand_ParseFail;
3195
3196 if (Count > 4) {
3197 Error(S, "invalid number of vectors");
3198 return MatchOperand_ParseFail;
3199 }
3200
3201 unsigned NumElements = 0;
3202 unsigned ElementWidth = 0;
3203 if (!Kind.empty()) {
3204 if (const auto &VK = parseVectorKind(Kind, VectorKind))
3205 std::tie(NumElements, ElementWidth) = *VK;
3206 }
3207
3208 Operands.push_back(AArch64Operand::CreateVectorList(
3209 FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3210 getContext()));
3211
3212 return MatchOperand_Success;
3213}
3214
3215/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3216bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3217 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3218 if (ParseRes != MatchOperand_Success)
3219 return true;
3220
3221 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3222}
3223
3224OperandMatchResultTy
3225AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3226 SMLoc StartLoc = getLoc();
3227
3228 unsigned RegNum;
3229 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3230 if (Res != MatchOperand_Success)
3231 return Res;
3232
3233 if (!parseOptionalToken(AsmToken::Comma)) {
3234 Operands.push_back(AArch64Operand::CreateReg(
3235 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3236 return MatchOperand_Success;
3237 }
3238
3239 parseOptionalToken(AsmToken::Hash);
3240
3241 if (getParser().getTok().isNot(AsmToken::Integer)) {
3242 Error(getLoc(), "index must be absent or #0");
3243 return MatchOperand_ParseFail;
3244 }
3245
3246 const MCExpr *ImmVal;
3247 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3248 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3249 Error(getLoc(), "index must be absent or #0");
3250 return MatchOperand_ParseFail;
3251 }
3252
3253 Operands.push_back(AArch64Operand::CreateReg(
3254 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3255 return MatchOperand_Success;
3256}
3257
3258template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3259OperandMatchResultTy
3260AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3261 SMLoc StartLoc = getLoc();
3262
3263 unsigned RegNum;
3264 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3265 if (Res != MatchOperand_Success)
3266 return Res;
3267
3268 // No shift/extend is the default.
3269 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3270 Operands.push_back(AArch64Operand::CreateReg(
3271 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3272 return MatchOperand_Success;
3273 }
3274
3275 // Eat the comma
3276 getParser().Lex();
3277
3278 // Match the shift
3279 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
3280 Res = tryParseOptionalShiftExtend(ExtOpnd);
3281 if (Res != MatchOperand_Success)
3282 return Res;
3283
3284 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3285 Operands.push_back(AArch64Operand::CreateReg(
3286 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3287 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3288 Ext->hasShiftExtendAmount()));
3289
3290 return MatchOperand_Success;
3291}
3292
3293bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3294 MCAsmParser &Parser = getParser();
3295
3296 // Some SVE instructions have a decoration after the immediate, i.e.
3297 // "mul vl". We parse them here and add tokens, which must be present in the
3298 // asm string in the tablegen instruction.
3299 bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3300 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3301 if (!Parser.getTok().getString().equals_lower("mul") ||
3302 !(NextIsVL || NextIsHash))
3303 return true;
3304
3305 Operands.push_back(
3306 AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3307 Parser.Lex(); // Eat the "mul"
3308
3309 if (NextIsVL) {
3310 Operands.push_back(
3311 AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3312 Parser.Lex(); // Eat the "vl"
3313 return false;
3314 }
3315
3316 if (NextIsHash) {
3317 Parser.Lex(); // Eat the #
3318 SMLoc S = getLoc();
3319
3320 // Parse immediate operand.
3321 const MCExpr *ImmVal;
3322 if (!Parser.parseExpression(ImmVal))
3323 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3324 Operands.push_back(AArch64Operand::CreateImm(
3325 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3326 getContext()));
3327 return MatchOperand_Success;
3328 }
3329 }
3330
3331 return Error(getLoc(), "expected 'vl' or '#<imm>'");
3332}
3333
3334/// parseOperand - Parse a arm instruction operand. For now this parses the
3335/// operand regardless of the mnemonic.
3336bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3337 bool invertCondCode) {
3338 MCAsmParser &Parser = getParser();
3339
3340 OperandMatchResultTy ResTy =
3341 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3342
3343 // Check if the current operand has a custom associated parser, if so, try to
3344 // custom parse the operand, or fallback to the general approach.
3345 if (ResTy == MatchOperand_Success)
3346 return false;
3347 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3348 // there was a match, but an error occurred, in which case, just return that
3349 // the operand parsing failed.
3350 if (ResTy == MatchOperand_ParseFail)
3351 return true;
3352
3353 // Nothing custom, so do general case parsing.
3354 SMLoc S, E;
3355 switch (getLexer().getKind()) {
3356 default: {
3357 SMLoc S = getLoc();
3358 const MCExpr *Expr;
3359 if (parseSymbolicImmVal(Expr))
3360 return Error(S, "invalid operand");
3361
3362 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3363 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3364 return false;
3365 }
3366 case AsmToken::LBrac: {
3367 SMLoc Loc = Parser.getTok().getLoc();
3368 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3369 getContext()));
3370 Parser.Lex(); // Eat '['
3371
3372 // There's no comma after a '[', so we can parse the next operand
3373 // immediately.
3374 return parseOperand(Operands, false, false);
3375 }
3376 case AsmToken::LCurly:
3377 return parseNeonVectorList(Operands);
3378 case AsmToken::Identifier: {
3379 // If we're expecting a Condition Code operand, then just parse that.
3380 if (isCondCode)
3381 return parseCondCode(Operands, invertCondCode);
3382
3383 // If it's a register name, parse it.
3384 if (!parseRegister(Operands))
3385 return false;
3386
3387 // See if this is a "mul vl" decoration or "mul #<int>" operand used
3388 // by SVE instructions.
3389 if (!parseOptionalMulOperand(Operands))
3390 return false;
3391
3392 // This could be an optional "shift" or "extend" operand.
3393 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3394 // We can only continue if no tokens were eaten.
3395 if (GotShift != MatchOperand_NoMatch)
3396 return GotShift;
3397
3398 // This was not a register so parse other operands that start with an
3399 // identifier (like labels) as expressions and create them as immediates.
3400 const MCExpr *IdVal;
3401 S = getLoc();
3402 if (getParser().parseExpression(IdVal))
3403 return true;
3404 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3405 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3406 return false;
3407 }
3408 case AsmToken::Integer:
3409 case AsmToken::Real:
3410 case AsmToken::Hash: {
3411 // #42 -> immediate.
3412 S = getLoc();
3413
3414 parseOptionalToken(AsmToken::Hash);
3415
3416 // Parse a negative sign
3417 bool isNegative = false;
3418 if (Parser.getTok().is(AsmToken::Minus)) {
3419 isNegative = true;
3420 // We need to consume this token only when we have a Real, otherwise
3421 // we let parseSymbolicImmVal take care of it
3422 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3423 Parser.Lex();
3424 }
3425
3426 // The only Real that should come through here is a literal #0.0 for
3427 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3428 // so convert the value.
3429 const AsmToken &Tok = Parser.getTok();
3430 if (Tok.is(AsmToken::Real)) {
3431 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3432 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3433 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3434 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3435 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3436 return TokError("unexpected floating point literal");
3437 else if (IntVal != 0 || isNegative)
3438 return TokError("expected floating-point constant #0.0");
3439 Parser.Lex(); // Eat the token.
3440
3441 Operands.push_back(
3442 AArch64Operand::CreateToken("#0", false, S, getContext()));
3443 Operands.push_back(
3444 AArch64Operand::CreateToken(".0", false, S, getContext()));
3445 return false;
3446 }
3447
3448 const MCExpr *ImmVal;
3449 if (parseSymbolicImmVal(ImmVal))
3450 return true;
3451
3452 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3453 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3454 return false;
3455 }
3456 case AsmToken::Equal: {
3457 SMLoc Loc = getLoc();
3458 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3459 return TokError("unexpected token in operand");
3460 Parser.Lex(); // Eat '='
3461 const MCExpr *SubExprVal;
3462 if (getParser().parseExpression(SubExprVal))
3463 return true;
3464
3465 if (Operands.size() < 2 ||
3466 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3467 return Error(Loc, "Only valid when first operand is register");
3468
3469 bool IsXReg =
3470 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3471 Operands[1]->getReg());
3472
3473 MCContext& Ctx = getContext();
3474 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3475 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3476 if (isa<MCConstantExpr>(SubExprVal)) {
3477 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3478 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3479 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3480 ShiftAmt += 16;
3481 Imm >>= 16;
3482 }
3483 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3484 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3485 Operands.push_back(AArch64Operand::CreateImm(
3486 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3487 if (ShiftAmt)
3488 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3489 ShiftAmt, true, S, E, Ctx));
3490 return false;
3491 }
3492 APInt Simm = APInt(64, Imm << ShiftAmt);
3493 // check if the immediate is an unsigned or signed 32-bit int for W regs
3494 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3495 return Error(Loc, "Immediate too large for register");
3496 }
3497 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3498 const MCExpr *CPLoc =
3499 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3500 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3501 return false;
3502 }
3503 }
3504}
3505
3506bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3507 const MCParsedAsmOperand &Op2) const {
3508 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3509 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3510 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3511 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3512 return MCTargetAsmParser::regsEqual(Op1, Op2);
3513
3514 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&(static_cast <bool> (AOp1.isScalarReg() && AOp2
.isScalarReg() && "Testing equality of non-scalar registers not supported"
) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3515, __extension__ __PRETTY_FUNCTION__))
3515 "Testing equality of non-scalar registers not supported")(static_cast <bool> (AOp1.isScalarReg() && AOp2
.isScalarReg() && "Testing equality of non-scalar registers not supported"
) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3515, __extension__ __PRETTY_FUNCTION__))
;
3516
3517 // Check if a registers match their sub/super register classes.
3518 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3519 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3520 if (AOp1.getRegEqualityTy() == EqualsSubReg)
3521 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3522 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3523 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3524 if (AOp2.getRegEqualityTy() == EqualsSubReg)
3525 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3526
3527 return false;
3528}
3529
3530/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3531/// operands.
3532bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3533 StringRef Name, SMLoc NameLoc,
3534 OperandVector &Operands) {
3535 MCAsmParser &Parser = getParser();
3536 Name = StringSwitch<StringRef>(Name.lower())
3537 .Case("beq", "b.eq")
3538 .Case("bne", "b.ne")
3539 .Case("bhs", "b.hs")
3540 .Case("bcs", "b.cs")
3541 .Case("blo", "b.lo")
3542 .Case("bcc", "b.cc")
3543 .Case("bmi", "b.mi")
3544 .Case("bpl", "b.pl")
3545 .Case("bvs", "b.vs")
3546 .Case("bvc", "b.vc")
3547 .Case("bhi", "b.hi")
3548 .Case("bls", "b.ls")
3549 .Case("bge", "b.ge")
3550 .Case("blt", "b.lt")
3551 .Case("bgt", "b.gt")
3552 .Case("ble", "b.le")
3553 .Case("bal", "b.al")
3554 .Case("bnv", "b.nv")
3555 .Default(Name);
3556
3557 // First check for the AArch64-specific .req directive.
3558 if (Parser.getTok().is(AsmToken::Identifier) &&
3559 Parser.getTok().getIdentifier() == ".req") {
3560 parseDirectiveReq(Name, NameLoc);
3561 // We always return 'error' for this, as we're done with this
3562 // statement and don't need to match the 'instruction."
3563 return true;
3564 }
3565
3566 // Create the leading tokens for the mnemonic, split by '.' characters.
3567 size_t Start = 0, Next = Name.find('.');
3568 StringRef Head = Name.slice(Start, Next);
3569
3570 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3571 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
3572 return parseSysAlias(Head, NameLoc, Operands);
3573
3574 Operands.push_back(
3575 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3576 Mnemonic = Head;
3577
3578 // Handle condition codes for a branch mnemonic
3579 if (Head == "b" && Next != StringRef::npos) {
3580 Start = Next;
3581 Next = Name.find('.', Start + 1);
3582 Head = Name.slice(Start + 1, Next);
3583
3584 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3585 (Head.data() - Name.data()));
3586 AArch64CC::CondCode CC = parseCondCodeString(Head);
3587 if (CC == AArch64CC::Invalid)
3588 return Error(SuffixLoc, "invalid condition code");
3589 Operands.push_back(
3590 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3591 Operands.push_back(
3592 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3593 }
3594
3595 // Add the remaining tokens in the mnemonic.
3596 while (Next != StringRef::npos) {
3597 Start = Next;
3598 Next = Name.find('.', Start + 1);
3599 Head = Name.slice(Start, Next);
3600 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3601 (Head.data() - Name.data()) + 1);
3602 Operands.push_back(
3603 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3604 }
3605
3606 // Conditional compare instructions have a Condition Code operand, which needs
3607 // to be parsed and an immediate operand created.
3608 bool condCodeFourthOperand =
3609 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3610 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3611 Head == "csinc" || Head == "csinv" || Head == "csneg");
3612
3613 // These instructions are aliases to some of the conditional select
3614 // instructions. However, the condition code is inverted in the aliased
3615 // instruction.
3616 //
3617 // FIXME: Is this the correct way to handle these? Or should the parser
3618 // generate the aliased instructions directly?
3619 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3620 bool condCodeThirdOperand =
3621 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3622
3623 // Read the remaining operands.
3624 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3625 // Read the first operand.
3626 if (parseOperand(Operands, false, false)) {
3627 return true;
3628 }
3629
3630 unsigned N = 2;
3631 while (parseOptionalToken(AsmToken::Comma)) {
3632 // Parse and remember the operand.
3633 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3634 (N == 3 && condCodeThirdOperand) ||
3635 (N == 2 && condCodeSecondOperand),
3636 condCodeSecondOperand || condCodeThirdOperand)) {
3637 return true;
3638 }
3639
3640 // After successfully parsing some operands there are two special cases to
3641 // consider (i.e. notional operands not separated by commas). Both are due
3642 // to memory specifiers:
3643 // + An RBrac will end an address for load/store/prefetch
3644 // + An '!' will indicate a pre-indexed operation.
3645 //
3646 // It's someone else's responsibility to make sure these tokens are sane
3647 // in the given context!
3648
3649 SMLoc RLoc = Parser.getTok().getLoc();
3650 if (parseOptionalToken(AsmToken::RBrac))
3651 Operands.push_back(
3652 AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3653 SMLoc ELoc = Parser.getTok().getLoc();
3654 if (parseOptionalToken(AsmToken::Exclaim))
3655 Operands.push_back(
3656 AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3657
3658 ++N;
3659 }
3660 }
3661
3662 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3663 return true;
3664
3665 return false;
3666}
3667
3668// FIXME: This entire function is a giant hack to provide us with decent
3669// operand range validation/diagnostics until TableGen/MC can be extended
3670// to support autogeneration of this kind of validation.
3671bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3672 SmallVectorImpl<SMLoc> &Loc) {
3673 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3674 // Check for indexed addressing modes w/ the base register being the
3675 // same as a destination/source register or pair load where
3676 // the Rt == Rt2. All of those are undefined behaviour.
3677 switch (Inst.getOpcode()) {
3678 case AArch64::LDPSWpre:
3679 case AArch64::LDPWpost:
3680 case AArch64::LDPWpre:
3681 case AArch64::LDPXpost:
3682 case AArch64::LDPXpre: {
3683 unsigned Rt = Inst.getOperand(1).getReg();
3684 unsigned Rt2 = Inst.getOperand(2).getReg();
3685 unsigned Rn = Inst.getOperand(3).getReg();
3686 if (RI->isSubRegisterEq(Rn, Rt))
3687 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3688 "is also a destination");
3689 if (RI->isSubRegisterEq(Rn, Rt2))
3690 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3691 "is also a destination");
3692 LLVM_FALLTHROUGH[[clang::fallthrough]];
3693 }
3694 case AArch64::LDPDi:
3695 case AArch64::LDPQi:
3696 case AArch64::LDPSi:
3697 case AArch64::LDPSWi:
3698 case AArch64::LDPWi:
3699 case AArch64::LDPXi: {
3700 unsigned Rt = Inst.getOperand(0).getReg();
3701 unsigned Rt2 = Inst.getOperand(1).getReg();
3702 if (Rt == Rt2)
3703 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3704 break;
3705 }
3706 case AArch64::LDPDpost:
3707 case AArch64::LDPDpre:
3708 case AArch64::LDPQpost:
3709 case AArch64::LDPQpre:
3710 case AArch64::LDPSpost:
3711 case AArch64::LDPSpre:
3712 case AArch64::LDPSWpost: {
3713 unsigned Rt = Inst.getOperand(1).getReg();
3714 unsigned Rt2 = Inst.getOperand(2).getReg();
3715 if (Rt == Rt2)
3716 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3717 break;
3718 }
3719 case AArch64::STPDpost:
3720 case AArch64::STPDpre:
3721 case AArch64::STPQpost:
3722 case AArch64::STPQpre:
3723 case AArch64::STPSpost:
3724 case AArch64::STPSpre:
3725 case AArch64::STPWpost:
3726 case AArch64::STPWpre:
3727 case AArch64::STPXpost:
3728 case AArch64::STPXpre: {
3729 unsigned Rt = Inst.getOperand(1).getReg();
3730 unsigned Rt2 = Inst.getOperand(2).getReg();
3731 unsigned Rn = Inst.getOperand(3).getReg();
3732 if (RI->isSubRegisterEq(Rn, Rt))
3733 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3734 "is also a source");
3735 if (RI->isSubRegisterEq(Rn, Rt2))
3736 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3737 "is also a source");
3738 break;
3739 }
3740 case AArch64::LDRBBpre:
3741 case AArch64::LDRBpre:
3742 case AArch64::LDRHHpre:
3743 case AArch64::LDRHpre:
3744 case AArch64::LDRSBWpre:
3745 case AArch64::LDRSBXpre:
3746 case AArch64::LDRSHWpre:
3747 case AArch64::LDRSHXpre:
3748 case AArch64::LDRSWpre:
3749 case AArch64::LDRWpre:
3750 case AArch64::LDRXpre:
3751 case AArch64::LDRBBpost:
3752 case AArch64::LDRBpost:
3753 case AArch64::LDRHHpost:
3754 case AArch64::LDRHpost:
3755 case AArch64::LDRSBWpost:
3756 case AArch64::LDRSBXpost:
3757 case AArch64::LDRSHWpost:
3758 case AArch64::LDRSHXpost:
3759 case AArch64::LDRSWpost:
3760 case AArch64::LDRWpost:
3761 case AArch64::LDRXpost: {
3762 unsigned Rt = Inst.getOperand(1).getReg();
3763 unsigned Rn = Inst.getOperand(2).getReg();
3764 if (RI->isSubRegisterEq(Rn, Rt))
3765 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3766 "is also a source");
3767 break;
3768 }
3769 case AArch64::STRBBpost:
3770 case AArch64::STRBpost:
3771 case AArch64::STRHHpost:
3772 case AArch64::STRHpost:
3773 case AArch64::STRWpost:
3774 case AArch64::STRXpost:
3775 case AArch64::STRBBpre:
3776 case AArch64::STRBpre:
3777 case AArch64::STRHHpre:
3778 case AArch64::STRHpre:
3779 case AArch64::STRWpre:
3780 case AArch64::STRXpre: {
3781 unsigned Rt = Inst.getOperand(1).getReg();
3782 unsigned Rn = Inst.getOperand(2).getReg();
3783 if (RI->isSubRegisterEq(Rn, Rt))
3784 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3785 "is also a source");
3786 break;
3787 }
3788 case AArch64::STXRB:
3789 case AArch64::STXRH:
3790 case AArch64::STXRW:
3791 case AArch64::STXRX:
3792 case AArch64::STLXRB:
3793 case AArch64::STLXRH:
3794 case AArch64::STLXRW:
3795 case AArch64::STLXRX: {
3796 unsigned Rs = Inst.getOperand(0).getReg();
3797 unsigned Rt = Inst.getOperand(1).getReg();
3798 unsigned Rn = Inst.getOperand(2).getReg();
3799 if (RI->isSubRegisterEq(Rt, Rs) ||
3800 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
3801 return Error(Loc[0],
3802 "unpredictable STXR instruction, status is also a source");
3803 break;
3804 }
3805 case AArch64::STXPW:
3806 case AArch64::STXPX:
3807 case AArch64::STLXPW:
3808 case AArch64::STLXPX: {
3809 unsigned Rs = Inst.getOperand(0).getReg();
3810 unsigned Rt1 = Inst.getOperand(1).getReg();
3811 unsigned Rt2 = Inst.getOperand(2).getReg();
3812 unsigned Rn = Inst.getOperand(3).getReg();
3813 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
3814 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
3815 return Error(Loc[0],
3816 "unpredictable STXP instruction, status is also a source");
3817 break;
3818 }
3819 }
3820
3821
3822 // Now check immediate ranges. Separate from the above as there is overlap
3823 // in the instructions being checked and this keeps the nested conditionals
3824 // to a minimum.
3825 switch (Inst.getOpcode()) {
3826 case AArch64::ADDSWri:
3827 case AArch64::ADDSXri:
3828 case AArch64::ADDWri:
3829 case AArch64::ADDXri:
3830 case AArch64::SUBSWri:
3831 case AArch64::SUBSXri:
3832 case AArch64::SUBWri:
3833 case AArch64::SUBXri: {
3834 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3835 // some slight duplication here.
3836 if (Inst.getOperand(2).isExpr()) {
3837 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3838 AArch64MCExpr::VariantKind ELFRefKind;
3839 MCSymbolRefExpr::VariantKind DarwinRefKind;
3840 int64_t Addend;
3841 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3842
3843 // Only allow these with ADDXri.
3844 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3845 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3846 Inst.getOpcode() == AArch64::ADDXri)
3847 return false;
3848
3849 // Only allow these with ADDXri/ADDWri
3850 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3851 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3852 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3853 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3854 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3855 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3856 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3857 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
3858 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
3859 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
3860 (Inst.getOpcode() == AArch64::ADDXri ||
3861 Inst.getOpcode() == AArch64::ADDWri))
3862 return false;
3863
3864 // Don't allow symbol refs in the immediate field otherwise
3865 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
3866 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
3867 // 'cmp w0, 'borked')
3868 return Error(Loc.back(), "invalid immediate expression");
3869 }
3870 // We don't validate more complex expressions here
3871 }
3872 return false;
3873 }
3874 default:
3875 return false;
3876 }
3877}
3878
3879static std::string AArch64MnemonicSpellCheck(StringRef S, uint64_t FBS,
3880 unsigned VariantID = 0);
3881
3882bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
3883 uint64_t ErrorInfo,
3884 OperandVector &Operands) {
3885 switch (ErrCode) {
3886 case Match_InvalidTiedOperand: {
3887 RegConstraintEqualityTy EqTy =
3888 static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
3889 .getRegEqualityTy();
3890 switch (EqTy) {
3891 case RegConstraintEqualityTy::EqualsSubReg:
3892 return Error(Loc, "operand must be 64-bit form of destination register");
3893 case RegConstraintEqualityTy::EqualsSuperReg:
3894 return Error(Loc, "operand must be 32-bit form of destination register");
3895 case RegConstraintEqualityTy::EqualsReg:
3896 return Error(Loc, "operand must match destination register");
3897 }
3898 llvm_unreachable("Unknown RegConstraintEqualityTy")::llvm::llvm_unreachable_internal("Unknown RegConstraintEqualityTy"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3898)
;
3899 }
3900 case Match_MissingFeature:
3901 return Error(Loc,
3902 "instruction requires a CPU feature not currently enabled");
3903 case Match_InvalidOperand:
3904 return Error(Loc, "invalid operand for instruction");
3905 case Match_InvalidSuffix:
3906 return Error(Loc, "invalid type suffix for instruction");
3907 case Match_InvalidCondCode:
3908 return Error(Loc, "expected AArch64 condition code");
3909 case Match_AddSubRegExtendSmall:
3910 return Error(Loc,
3911 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3912 case Match_AddSubRegExtendLarge:
3913 return Error(Loc,
3914 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3915 case Match_AddSubSecondSource:
3916 return Error(Loc,
3917 "expected compatible register, symbol or integer in range [0, 4095]");
3918 case Match_LogicalSecondSource:
3919 return Error(Loc, "expected compatible register or logical immediate");
3920 case Match_InvalidMovImm32Shift:
3921 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3922 case Match_InvalidMovImm64Shift:
3923 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3924 case Match_AddSubRegShift32:
3925 return Error(Loc,
3926 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3927 case Match_AddSubRegShift64:
3928 return Error(Loc,
3929 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3930 case Match_InvalidFPImm:
3931 return Error(Loc,
3932 "expected compatible register or floating-point constant");
3933 case Match_InvalidMemoryIndexedSImm6:
3934 return Error(Loc, "index must be an integer in range [-32, 31].");
3935 case Match_InvalidMemoryIndexedSImm5:
3936 return Error(Loc, "index must be an integer in range [-16, 15].");
3937 case Match_InvalidMemoryIndexed1SImm4:
3938 return Error(Loc, "index must be an integer in range [-8, 7].");
3939 case Match_InvalidMemoryIndexed2SImm4:
3940 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
3941 case Match_InvalidMemoryIndexed3SImm4:
3942 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
3943 case Match_InvalidMemoryIndexed4SImm4:
3944 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
3945 case Match_InvalidMemoryIndexed16SImm4:
3946 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
3947 case Match_InvalidMemoryIndexed1SImm6:
3948 return Error(Loc, "index must be an integer in range [-32, 31].");
3949 case Match_InvalidMemoryIndexedSImm8:
3950 return Error(Loc, "index must be an integer in range [-128, 127].");
3951 case Match_InvalidMemoryIndexedSImm9:
3952 return Error(Loc, "index must be an integer in range [-256, 255].");
3953 case Match_InvalidMemoryIndexed8SImm10:
3954 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
3955 case Match_InvalidMemoryIndexed4SImm7:
3956 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3957 case Match_InvalidMemoryIndexed8SImm7:
3958 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3959 case Match_InvalidMemoryIndexed16SImm7:
3960 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3961 case Match_InvalidMemoryIndexed8UImm5:
3962 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
3963 case Match_InvalidMemoryIndexed4UImm5:
3964 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
3965 case Match_InvalidMemoryIndexed2UImm5:
3966 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
3967 case Match_InvalidMemoryIndexed8UImm6:
3968 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
3969 case Match_InvalidMemoryIndexed4UImm6:
3970 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
3971 case Match_InvalidMemoryIndexed2UImm6:
3972 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
3973 case Match_InvalidMemoryIndexed1UImm6:
3974 return Error(Loc, "index must be in range [0, 63].");
3975 case Match_InvalidMemoryWExtend8:
3976 return Error(Loc,
3977 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3978 case Match_InvalidMemoryWExtend16:
3979 return Error(Loc,
3980 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3981 case Match_InvalidMemoryWExtend32:
3982 return Error(Loc,
3983 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3984 case Match_InvalidMemoryWExtend64:
3985 return Error(Loc,
3986 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3987 case Match_InvalidMemoryWExtend128:
3988 return Error(Loc,
3989 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3990 case Match_InvalidMemoryXExtend8:
3991 return Error(Loc,
3992 "expected 'lsl' or 'sxtx' with optional shift of #0");
3993 case Match_InvalidMemoryXExtend16:
3994 return Error(Loc,
3995 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3996 case Match_InvalidMemoryXExtend32:
3997 return Error(Loc,
3998 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3999 case Match_InvalidMemoryXExtend64:
4000 return Error(Loc,
4001 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4002 case Match_InvalidMemoryXExtend128:
4003 return Error(Loc,
4004 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4005 case Match_InvalidMemoryIndexed1:
4006 return Error(Loc, "index must be an integer in range [0, 4095].");
4007 case Match_InvalidMemoryIndexed2:
4008 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4009 case Match_InvalidMemoryIndexed4:
4010 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4011 case Match_InvalidMemoryIndexed8:
4012 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4013 case Match_InvalidMemoryIndexed16:
4014 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4015 case Match_InvalidImm0_1:
4016 return Error(Loc, "immediate must be an integer in range [0, 1].");
4017 case Match_InvalidImm0_7:
4018 return Error(Loc, "immediate must be an integer in range [0, 7].");
4019 case Match_InvalidImm0_15:
4020 return Error(Loc, "immediate must be an integer in range [0, 15].");
4021 case Match_InvalidImm0_31:
4022 return Error(Loc, "immediate must be an integer in range [0, 31].");
4023 case Match_InvalidImm0_63:
4024 return Error(Loc, "immediate must be an integer in range [0, 63].");
4025 case Match_InvalidImm0_127:
4026 return Error(Loc, "immediate must be an integer in range [0, 127].");
4027 case Match_InvalidImm0_255:
4028 return Error(Loc, "immediate must be an integer in range [0, 255].");
4029 case Match_InvalidImm0_65535:
4030 return Error(Loc, "immediate must be an integer in range [0, 65535].");
4031 case Match_InvalidImm1_8:
4032 return Error(Loc, "immediate must be an integer in range [1, 8].");
4033 case Match_InvalidImm1_16:
4034 return Error(Loc, "immediate must be an integer in range [1, 16].");
4035 case Match_InvalidImm1_32:
4036 return Error(Loc, "immediate must be an integer in range [1, 32].");
4037 case Match_InvalidImm1_64:
4038 return Error(Loc, "immediate must be an integer in range [1, 64].");
4039 case Match_InvalidSVEAddSubImm8:
4040 return Error(Loc, "immediate must be an integer in range [0, 255]"
4041 " with a shift amount of 0");
4042 case Match_InvalidSVEAddSubImm16:
4043 case Match_InvalidSVEAddSubImm32:
4044 case Match_InvalidSVEAddSubImm64:
4045 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4046 "multiple of 256 in range [256, 65280]");
4047 case Match_InvalidSVECpyImm8:
4048 return Error(Loc, "immediate must be an integer in range [-128, 255]"
4049 " with a shift amount of 0");
4050 case Match_InvalidSVECpyImm16:
4051 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4052 "multiple of 256 in range [-32768, 65280]");
4053 case Match_InvalidSVECpyImm32:
4054 case Match_InvalidSVECpyImm64:
4055 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4056 "multiple of 256 in range [-32768, 32512]");
4057 case Match_InvalidIndexRange1_1:
4058 return Error(Loc, "expected lane specifier '[1]'");
4059 case Match_InvalidIndexRange0_15:
4060 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4061 case Match_InvalidIndexRange0_7:
4062 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4063 case Match_InvalidIndexRange0_3:
4064 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4065 case Match_InvalidIndexRange0_1:
4066 return Error(Loc, "vector lane must be an integer in range [0, 1].");
4067 case Match_InvalidSVEIndexRange0_63:
4068 return Error(Loc, "vector lane must be an integer in range [0, 63].");
4069 case Match_InvalidSVEIndexRange0_31:
4070 return Error(Loc, "vector lane must be an integer in range [0, 31].");
4071 case Match_InvalidSVEIndexRange0_15:
4072 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4073 case Match_InvalidSVEIndexRange0_7:
4074 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4075 case Match_InvalidSVEIndexRange0_3:
4076 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4077 case Match_InvalidLabel:
4078 return Error(Loc, "expected label or encodable integer pc offset");
4079 case Match_MRS:
4080 return Error(Loc, "expected readable system register");
4081 case Match_MSR:
4082 return Error(Loc, "expected writable system register or pstate");
4083 case Match_InvalidComplexRotationEven:
4084 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4085 case Match_InvalidComplexRotationOdd:
4086 return Error(Loc, "complex rotation must be 90 or 270.");
4087 case Match_MnemonicFail: {
4088 std::string Suggestion = AArch64MnemonicSpellCheck(
4089 ((AArch64Operand &)*Operands[0]).getToken(),
4090 ComputeAvailableFeatures(STI->getFeatureBits()));
4091 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4092 }
4093 case Match_InvalidGPR64shifted8:
4094 return Error(Loc, "register must be x0..x30 or xzr, without shift");
4095 case Match_InvalidGPR64shifted16:
4096 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4097 case Match_InvalidGPR64shifted32:
4098 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4099 case Match_InvalidGPR64shifted64:
4100 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4101 case Match_InvalidGPR64NoXZRshifted8:
4102 return Error(Loc, "register must be x0..x30 without shift");
4103 case Match_InvalidGPR64NoXZRshifted16:
4104 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4105 case Match_InvalidGPR64NoXZRshifted32:
4106 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4107 case Match_InvalidGPR64NoXZRshifted64:
4108 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4109 case Match_InvalidZPR32UXTW8:
4110 case Match_InvalidZPR32SXTW8:
4111 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4112 case Match_InvalidZPR32UXTW16:
4113 case Match_InvalidZPR32SXTW16:
4114 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4115 case Match_InvalidZPR32UXTW32:
4116 case Match_InvalidZPR32SXTW32:
4117 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4118 case Match_InvalidZPR32UXTW64:
4119 case Match_InvalidZPR32SXTW64:
4120 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4121 case Match_InvalidZPR64UXTW8:
4122 case Match_InvalidZPR64SXTW8:
4123 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4124 case Match_InvalidZPR64UXTW16:
4125 case Match_InvalidZPR64SXTW16:
4126 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4127 case Match_InvalidZPR64UXTW32:
4128 case Match_InvalidZPR64SXTW32:
4129 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4130 case Match_InvalidZPR64UXTW64:
4131 case Match_InvalidZPR64SXTW64:
4132 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4133 case Match_InvalidZPR32LSL8:
4134 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4135 case Match_InvalidZPR32LSL16:
4136 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4137 case Match_InvalidZPR32LSL32:
4138 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4139 case Match_InvalidZPR32LSL64:
4140 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4141 case Match_InvalidZPR64LSL8:
4142 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4143 case Match_InvalidZPR64LSL16:
4144 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4145 case Match_InvalidZPR64LSL32:
4146 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4147 case Match_InvalidZPR64LSL64:
4148 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4149 case Match_InvalidZPR0:
4150 return Error(Loc, "expected register without element width sufix");
4151 case Match_InvalidZPR8:
4152 case Match_InvalidZPR16:
4153 case Match_InvalidZPR32:
4154 case Match_InvalidZPR64:
4155 case Match_InvalidZPR128:
4156 return Error(Loc, "invalid element width");
4157 case Match_InvalidZPR_3b8:
4158 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4159 case Match_InvalidZPR_3b16:
4160 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4161 case Match_InvalidZPR_3b32:
4162 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4163 case Match_InvalidZPR_4b16:
4164 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4165 case Match_InvalidZPR_4b32:
4166 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4167 case Match_InvalidZPR_4b64:
4168 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4169 case Match_InvalidSVEPattern:
4170 return Error(Loc, "invalid predicate pattern");
4171 case Match_InvalidSVEPredicateAnyReg:
4172 case Match_InvalidSVEPredicateBReg:
4173 case Match_InvalidSVEPredicateHReg:
4174 case Match_InvalidSVEPredicateSReg:
4175 case Match_InvalidSVEPredicateDReg:
4176 return Error(Loc, "invalid predicate register.");
4177 case Match_InvalidSVEPredicate3bAnyReg:
4178 case Match_InvalidSVEPredicate3bBReg:
4179 case Match_InvalidSVEPredicate3bHReg:
4180 case Match_InvalidSVEPredicate3bSReg:
4181 case Match_InvalidSVEPredicate3bDReg:
4182 return Error(Loc, "restricted predicate has range [0, 7].");
4183 case Match_InvalidSVEExactFPImmOperandHalfOne:
4184 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4185 case Match_InvalidSVEExactFPImmOperandHalfTwo:
4186 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4187 case Match_InvalidSVEExactFPImmOperandZeroOne:
4188 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4189 default:
4190 llvm_unreachable("unexpected error code!")::llvm::llvm_unreachable_internal("unexpected error code!", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4190)
;
4191 }
4192}
4193
4194static const char *getSubtargetFeatureName(uint64_t Val);
4195
4196bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4197 OperandVector &Operands,
4198 MCStreamer &Out,
4199 uint64_t &ErrorInfo,
4200 bool MatchingInlineAsm) {
4201 assert(!Operands.empty() && "Unexpect empty operand list!")(static_cast <bool> (!Operands.empty() && "Unexpect empty operand list!"
) ? void (0) : __assert_fail ("!Operands.empty() && \"Unexpect empty operand list!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4201, __extension__ __PRETTY_FUNCTION__))
;
4202 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4203 assert(Op.isToken() && "Leading operand should always be a mnemonic!")(static_cast <bool> (Op.isToken() && "Leading operand should always be a mnemonic!"
) ? void (0) : __assert_fail ("Op.isToken() && \"Leading operand should always be a mnemonic!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4203, __extension__ __PRETTY_FUNCTION__))
;
4204
4205 StringRef Tok = Op.getToken();
4206 unsigned NumOperands = Operands.size();
4207
4208 if (NumOperands == 4 && Tok == "lsl") {
4209 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4210 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4211 if (Op2.isScalarReg() && Op3.isImm()) {
4212 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4213 if (Op3CE) {
4214 uint64_t Op3Val = Op3CE->getValue();
4215 uint64_t NewOp3Val = 0;
4216 uint64_t NewOp4Val = 0;
4217 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4218 Op2.getReg())) {
4219 NewOp3Val = (32 - Op3Val) & 0x1f;
4220 NewOp4Val = 31 - Op3Val;
4221 } else {
4222 NewOp3Val = (64 - Op3Val) & 0x3f;
4223 NewOp4Val = 63 - Op3Val;
4224 }
4225
4226 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4227 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4228
4229 Operands[0] = AArch64Operand::CreateToken(
4230 "ubfm", false, Op.getStartLoc(), getContext());
4231 Operands.push_back(AArch64Operand::CreateImm(
4232 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4233 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4234 Op3.getEndLoc(), getContext());
4235 }
4236 }
4237 } else if (NumOperands == 4 && Tok == "bfc") {
4238 // FIXME: Horrible hack to handle BFC->BFM alias.
4239 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4240 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4241 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4242
4243 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4244 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4245 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4246
4247 if (LSBCE && WidthCE) {
4248 uint64_t LSB = LSBCE->getValue();
4249 uint64_t Width = WidthCE->getValue();
4250
4251 uint64_t RegWidth = 0;
4252 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4253 Op1.getReg()))
4254 RegWidth = 64;
4255 else
4256 RegWidth = 32;
4257
4258 if (LSB >= RegWidth)
4259 return Error(LSBOp.getStartLoc(),
4260 "expected integer in range [0, 31]");
4261 if (Width < 1 || Width > RegWidth)
4262 return Error(WidthOp.getStartLoc(),
4263 "expected integer in range [1, 32]");
4264
4265 uint64_t ImmR = 0;
4266 if (RegWidth == 32)
4267 ImmR = (32 - LSB) & 0x1f;
4268 else
4269 ImmR = (64 - LSB) & 0x3f;
4270
4271 uint64_t ImmS = Width - 1;
4272
4273 if (ImmR != 0 && ImmS >= ImmR)
4274 return Error(WidthOp.getStartLoc(),
4275 "requested insert overflows register");
4276
4277 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4278 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4279 Operands[0] = AArch64Operand::CreateToken(
4280 "bfm", false, Op.getStartLoc(), getContext());
4281 Operands[2] = AArch64Operand::CreateReg(
4282 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4283 SMLoc(), SMLoc(), getContext());
4284 Operands[3] = AArch64Operand::CreateImm(
4285 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4286 Operands.emplace_back(
4287 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4288 WidthOp.getEndLoc(), getContext()));
4289 }
4290 }
4291 } else if (NumOperands == 5) {
4292 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4293 // UBFIZ -> UBFM aliases.
4294 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4295 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4296 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4297 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4298
4299 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4300 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4301 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4302
4303 if (Op3CE && Op4CE) {
4304 uint64_t Op3Val = Op3CE->getValue();
4305 uint64_t Op4Val = Op4CE->getValue();
4306
4307 uint64_t RegWidth = 0;
4308 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4309 Op1.getReg()))
4310 RegWidth = 64;
4311 else
4312 RegWidth = 32;
4313
4314 if (Op3Val >= RegWidth)
4315 return Error(Op3.getStartLoc(),
4316 "expected integer in range [0, 31]");
4317 if (Op4Val < 1 || Op4Val > RegWidth)
4318 return Error(Op4.getStartLoc(),
4319 "expected integer in range [1, 32]");
4320
4321 uint64_t NewOp3Val = 0;
4322 if (RegWidth == 32)
4323 NewOp3Val = (32 - Op3Val) & 0x1f;
4324 else
4325 NewOp3Val = (64 - Op3Val) & 0x3f;
4326
4327 uint64_t NewOp4Val = Op4Val - 1;
4328
4329 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4330 return Error(Op4.getStartLoc(),
4331 "requested insert overflows register");
4332
4333 const MCExpr *NewOp3 =
4334 MCConstantExpr::create(NewOp3Val, getContext());
4335 const MCExpr *NewOp4 =
4336 MCConstantExpr::create(NewOp4Val, getContext());
4337 Operands[3] = AArch64Operand::CreateImm(
4338 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4339 Operands[4] = AArch64Operand::CreateImm(
4340 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4341 if (Tok == "bfi")
4342 Operands[0] = AArch64Operand::CreateToken(
4343 "bfm", false, Op.getStartLoc(), getContext());
4344 else if (Tok == "sbfiz")
4345 Operands[0] = AArch64Operand::CreateToken(
4346 "sbfm", false, Op.getStartLoc(), getContext());
4347 else if (Tok == "ubfiz")
4348 Operands[0] = AArch64Operand::CreateToken(
4349 "ubfm", false, Op.getStartLoc(), getContext());
4350 else
4351 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4351)
;
4352 }
4353 }
4354
4355 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4356 // UBFX -> UBFM aliases.
4357 } else if (NumOperands == 5 &&
4358 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4359 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4360 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4361 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4362
4363 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4364 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4365 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4366
4367 if (Op3CE && Op4CE) {
4368 uint64_t Op3Val = Op3CE->getValue();
4369 uint64_t Op4Val = Op4CE->getValue();
4370
4371 uint64_t RegWidth = 0;
4372 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4373 Op1.getReg()))
4374 RegWidth = 64;
4375 else
4376 RegWidth = 32;
4377
4378 if (Op3Val >= RegWidth)
4379 return Error(Op3.getStartLoc(),
4380 "expected integer in range [0, 31]");
4381 if (Op4Val < 1 || Op4Val > RegWidth)
4382 return Error(Op4.getStartLoc(),
4383 "expected integer in range [1, 32]");
4384
4385 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4386
4387 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4388 return Error(Op4.getStartLoc(),
4389 "requested extract overflows register");
4390
4391 const MCExpr *NewOp4 =
4392 MCConstantExpr::create(NewOp4Val, getContext());
4393 Operands[4] = AArch64Operand::CreateImm(
4394 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4395 if (Tok == "bfxil")
4396 Operands[0] = AArch64Operand::CreateToken(
4397 "bfm", false, Op.getStartLoc(), getContext());
4398 else if (Tok == "sbfx")
4399 Operands[0] = AArch64Operand::CreateToken(
4400 "sbfm", false, Op.getStartLoc(), getContext());
4401 else if (Tok == "ubfx")
4402 Operands[0] = AArch64Operand::CreateToken(
4403 "ubfm", false, Op.getStartLoc(), getContext());
4404 else
4405 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4405)
;
4406 }
4407 }
4408 }
4409 }
4410
4411 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4412 // instruction for FP registers correctly in some rare circumstances. Convert
4413 // it to a safe instruction and warn (because silently changing someone's
4414 // assembly is rude).
4415 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4416 NumOperands == 4 && Tok == "movi") {
4417 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4418 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4419 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4420 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4421 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4422 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4423 if (Suffix.lower() == ".2d" &&
4424 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4425 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4426 " correctly on this CPU, converting to equivalent movi.16b");
4427 // Switch the suffix to .16b.
4428 unsigned Idx = Op1.isToken() ? 1 : 2;
4429 Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4430 getContext());
4431 }
4432 }
4433 }
4434
4435 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4436 // InstAlias can't quite handle this since the reg classes aren't
4437 // subclasses.
4438 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4439 // The source register can be Wn here, but the matcher expects a
4440 // GPR64. Twiddle it here if necessary.
4441 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4442 if (Op.isScalarReg()) {
4443 unsigned Reg = getXRegFromWReg(Op.getReg());
4444 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4445 Op.getStartLoc(), Op.getEndLoc(),
4446 getContext());
4447 }
4448 }
4449 // FIXME: Likewise for sxt[bh] with a Xd dst operand
4450 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4451 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4452 if (Op.isScalarReg() &&
4453 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4454 Op.getReg())) {
4455 // The source register can be Wn here, but the matcher expects a
4456 // GPR64. Twiddle it here if necessary.
4457 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4458 if (Op.isScalarReg()) {
4459 unsigned Reg = getXRegFromWReg(Op.getReg());
4460 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4461 Op.getStartLoc(),
4462 Op.getEndLoc(), getContext());
4463 }
4464 }
4465 }
4466 // FIXME: Likewise for uxt[bh] with a Xd dst operand
4467 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4468 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4469 if (Op.isScalarReg() &&
4470 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4471 Op.getReg())) {
4472 // The source register can be Wn here, but the matcher expects a
4473 // GPR32. Twiddle it here if necessary.
4474 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4475 if (Op.isScalarReg()) {
4476 unsigned Reg = getWRegFromXReg(Op.getReg());
4477 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4478 Op.getStartLoc(),
4479 Op.getEndLoc(), getContext());
4480 }
4481 }
4482 }
4483
4484 MCInst Inst;
4485 // First try to match against the secondary set of tables containing the
4486 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4487 unsigned MatchResult =
4488 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4489
4490 // If that fails, try against the alternate table containing long-form NEON:
4491 // "fadd v0.2s, v1.2s, v2.2s"
4492 if (MatchResult != Match_Success) {
4493 // But first, save the short-form match result: we can use it in case the
4494 // long-form match also fails.
4495 auto ShortFormNEONErrorInfo = ErrorInfo;
4496 auto ShortFormNEONMatchResult = MatchResult;
4497
4498 MatchResult =
4499 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4500
4501 // Now, both matches failed, and the long-form match failed on the mnemonic
4502 // suffix token operand. The short-form match failure is probably more
4503 // relevant: use it instead.
4504 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4505 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4506 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4507 MatchResult = ShortFormNEONMatchResult;
4508 ErrorInfo = ShortFormNEONErrorInfo;
4509 }
4510 }
4511
4512 switch (MatchResult) {
4513 case Match_Success: {
4514 // Perform range checking and other semantic validations
4515 SmallVector<SMLoc, 8> OperandLocs;
4516 NumOperands = Operands.size();
4517 for (unsigned i = 1; i < NumOperands; ++i)
4518 OperandLocs.push_back(Operands[i]->getStartLoc());
4519 if (validateInstruction(Inst, OperandLocs))
4520 return true;
4521
4522 Inst.setLoc(IDLoc);
4523 Out.EmitInstruction(Inst, getSTI());
4524 return false;
4525 }
4526 case Match_MissingFeature: {
4527 assert(ErrorInfo && "Unknown missing feature!")(static_cast <bool> (ErrorInfo && "Unknown missing feature!"
) ? void (0) : __assert_fail ("ErrorInfo && \"Unknown missing feature!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4527, __extension__ __PRETTY_FUNCTION__))
;
4528 // Special case the error message for the very common case where only
4529 // a single subtarget feature is missing (neon, e.g.).
4530 std::string Msg = "instruction requires:";
4531 uint64_t Mask = 1;
4532 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4533 if (ErrorInfo & Mask) {
4534 Msg += " ";
4535 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4536 }
4537 Mask <<= 1;
4538 }
4539 return Error(IDLoc, Msg);
4540 }
4541 case Match_MnemonicFail:
4542 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
4543 case Match_InvalidOperand: {
4544 SMLoc ErrorLoc = IDLoc;
4545
4546 if (ErrorInfo != ~0ULL) {
4547 if (ErrorInfo >= Operands.size())
4548 return Error(IDLoc, "too few operands for instruction",
4549 SMRange(IDLoc, getTok().getLoc()));
4550
4551 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4552 if (ErrorLoc == SMLoc())
4553 ErrorLoc = IDLoc;
4554 }
4555 // If the match failed on a suffix token operand, tweak the diagnostic
4556 // accordingly.
4557 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4558 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4559 MatchResult = Match_InvalidSuffix;
4560
4561 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4562 }
4563 case Match_InvalidTiedOperand:
4564 case Match_InvalidMemoryIndexed1:
4565 case Match_InvalidMemoryIndexed2:
4566 case Match_InvalidMemoryIndexed4:
4567 case Match_InvalidMemoryIndexed8:
4568 case Match_InvalidMemoryIndexed16:
4569 case Match_InvalidCondCode:
4570 case Match_AddSubRegExtendSmall:
4571 case Match_AddSubRegExtendLarge:
4572 case Match_AddSubSecondSource:
4573 case Match_LogicalSecondSource:
4574 case Match_AddSubRegShift32:
4575 case Match_AddSubRegShift64:
4576 case Match_InvalidMovImm32Shift:
4577 case Match_InvalidMovImm64Shift:
4578 case Match_InvalidFPImm:
4579 case Match_InvalidMemoryWExtend8:
4580 case Match_InvalidMemoryWExtend16:
4581 case Match_InvalidMemoryWExtend32:
4582 case Match_InvalidMemoryWExtend64:
4583 case Match_InvalidMemoryWExtend128:
4584 case Match_InvalidMemoryXExtend8:
4585 case Match_InvalidMemoryXExtend16:
4586 case Match_InvalidMemoryXExtend32:
4587 case Match_InvalidMemoryXExtend64:
4588 case Match_InvalidMemoryXExtend128:
4589 case Match_InvalidMemoryIndexed1SImm4:
4590 case Match_InvalidMemoryIndexed2SImm4:
4591 case Match_InvalidMemoryIndexed3SImm4:
4592 case Match_InvalidMemoryIndexed4SImm4:
4593 case Match_InvalidMemoryIndexed1SImm6:
4594 case Match_InvalidMemoryIndexed16SImm4:
4595 case Match_InvalidMemoryIndexed4SImm7:
4596 case Match_InvalidMemoryIndexed8SImm7:
4597 case Match_InvalidMemoryIndexed16SImm7:
4598 case Match_InvalidMemoryIndexed8UImm5:
4599 case Match_InvalidMemoryIndexed4UImm5:
4600 case Match_InvalidMemoryIndexed2UImm5:
4601 case Match_InvalidMemoryIndexed1UImm6:
4602 case Match_InvalidMemoryIndexed2UImm6:
4603 case Match_InvalidMemoryIndexed4UImm6:
4604 case Match_InvalidMemoryIndexed8UImm6:
4605 case Match_InvalidMemoryIndexedSImm6:
4606 case Match_InvalidMemoryIndexedSImm5:
4607 case Match_InvalidMemoryIndexedSImm8:
4608 case Match_InvalidMemoryIndexedSImm9:
4609 case Match_InvalidMemoryIndexed8SImm10:
4610 case Match_InvalidImm0_1:
4611 case Match_InvalidImm0_7:
4612 case Match_InvalidImm0_15:
4613 case Match_InvalidImm0_31:
4614 case Match_InvalidImm0_63:
4615 case Match_InvalidImm0_127:
4616 case Match_InvalidImm0_255:
4617 case Match_InvalidImm0_65535:
4618 case Match_InvalidImm1_8:
4619 case Match_InvalidImm1_16:
4620 case Match_InvalidImm1_32:
4621 case Match_InvalidImm1_64:
4622 case Match_InvalidSVEAddSubImm8:
4623 case Match_InvalidSVEAddSubImm16:
4624 case Match_InvalidSVEAddSubImm32:
4625 case Match_InvalidSVEAddSubImm64:
4626 case Match_InvalidSVECpyImm8:
4627 case Match_InvalidSVECpyImm16:
4628 case Match_InvalidSVECpyImm32:
4629 case Match_InvalidSVECpyImm64:
4630 case Match_InvalidIndexRange1_1:
4631 case Match_InvalidIndexRange0_15:
4632 case Match_InvalidIndexRange0_7:
4633 case Match_InvalidIndexRange0_3:
4634 case Match_InvalidIndexRange0_1:
4635 case Match_InvalidSVEIndexRange0_63:
4636 case Match_InvalidSVEIndexRange0_31:
4637 case Match_InvalidSVEIndexRange0_15:
4638 case Match_InvalidSVEIndexRange0_7:
4639 case Match_InvalidSVEIndexRange0_3:
4640 case Match_InvalidLabel:
4641 case Match_InvalidComplexRotationEven:
4642 case Match_InvalidComplexRotationOdd:
4643 case Match_InvalidGPR64shifted8:
4644 case Match_InvalidGPR64shifted16:
4645 case Match_InvalidGPR64shifted32:
4646 case Match_InvalidGPR64shifted64:
4647 case Match_InvalidGPR64NoXZRshifted8:
4648 case Match_InvalidGPR64NoXZRshifted16:
4649 case Match_InvalidGPR64NoXZRshifted32:
4650 case Match_InvalidGPR64NoXZRshifted64:
4651 case Match_InvalidZPR32UXTW8:
4652 case Match_InvalidZPR32UXTW16:
4653 case Match_InvalidZPR32UXTW32:
4654 case Match_InvalidZPR32UXTW64:
4655 case Match_InvalidZPR32SXTW8:
4656 case Match_InvalidZPR32SXTW16:
4657 case Match_InvalidZPR32SXTW32:
4658 case Match_InvalidZPR32SXTW64:
4659 case Match_InvalidZPR64UXTW8:
4660 case Match_InvalidZPR64SXTW8:
4661 case Match_InvalidZPR64UXTW16:
4662 case Match_InvalidZPR64SXTW16:
4663 case Match_InvalidZPR64UXTW32:
4664 case Match_InvalidZPR64SXTW32:
4665 case Match_InvalidZPR64UXTW64:
4666 case Match_InvalidZPR64SXTW64:
4667 case Match_InvalidZPR32LSL8:
4668 case Match_InvalidZPR32LSL16:
4669 case Match_InvalidZPR32LSL32:
4670 case Match_InvalidZPR32LSL64:
4671 case Match_InvalidZPR64LSL8:
4672 case Match_InvalidZPR64LSL16:
4673 case Match_InvalidZPR64LSL32:
4674 case Match_InvalidZPR64LSL64:
4675 case Match_InvalidZPR0:
4676 case Match_InvalidZPR8:
4677 case Match_InvalidZPR16:
4678 case Match_InvalidZPR32:
4679 case Match_InvalidZPR64:
4680 case Match_InvalidZPR128:
4681 case Match_InvalidZPR_3b8:
4682 case Match_InvalidZPR_3b16:
4683 case Match_InvalidZPR_3b32:
4684 case Match_InvalidZPR_4b16:
4685 case Match_InvalidZPR_4b32:
4686 case Match_InvalidZPR_4b64:
4687 case Match_InvalidSVEPredicateAnyReg:
4688 case Match_InvalidSVEPattern:
4689 case Match_InvalidSVEPredicateBReg:
4690 case Match_InvalidSVEPredicateHReg:
4691 case Match_InvalidSVEPredicateSReg:
4692 case Match_InvalidSVEPredicateDReg:
4693 case Match_InvalidSVEPredicate3bAnyReg:
4694 case Match_InvalidSVEPredicate3bBReg:
4695 case Match_InvalidSVEPredicate3bHReg:
4696 case Match_InvalidSVEPredicate3bSReg:
4697 case Match_InvalidSVEPredicate3bDReg:
4698 case Match_InvalidSVEExactFPImmOperandHalfOne:
4699 case Match_InvalidSVEExactFPImmOperandHalfTwo:
4700 case Match_InvalidSVEExactFPImmOperandZeroOne:
4701 case Match_MSR:
4702 case Match_MRS: {
4703 if (ErrorInfo >= Operands.size())
4704 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
4705 // Any time we get here, there's nothing fancy to do. Just get the
4706 // operand SMLoc and display the diagnostic.
4707 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4708 if (ErrorLoc == SMLoc())
4709 ErrorLoc = IDLoc;
4710 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4711 }
4712 }
4713
4714 llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4714)
;
4715}
4716
4717/// ParseDirective parses the arm specific directives
4718bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4719 const MCObjectFileInfo::Environment Format =
4720 getContext().getObjectFileInfo()->getObjectFileType();
4721 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4722 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4723
4724 StringRef IDVal = DirectiveID.getIdentifier();
4725 SMLoc Loc = DirectiveID.getLoc();
4726 if (IDVal == ".arch")
4727 parseDirectiveArch(Loc);
4728 else if (IDVal == ".cpu")
4729 parseDirectiveCPU(Loc);
4730 else if (IDVal == ".tlsdesccall")
4731 parseDirectiveTLSDescCall(Loc);
4732 else if (IDVal == ".ltorg" || IDVal == ".pool")
4733 parseDirectiveLtorg(Loc);
4734 else if (IDVal == ".unreq")
4735 parseDirectiveUnreq(Loc);
4736 else if (!IsMachO && !IsCOFF) {
4737 if (IDVal == ".inst")
4738 parseDirectiveInst(Loc);
4739 else
4740 return true;
4741 } else if (IDVal == MCLOHDirectiveName())
4742 parseDirectiveLOH(IDVal, Loc);
4743 else
4744 return true;
4745 return false;
4746}
4747
4748static const struct {
4749 const char *Name;
4750 const FeatureBitset Features;
4751} ExtensionMap[] = {
4752 { "crc", {AArch64::FeatureCRC} },
4753 { "sm4", {AArch64::FeatureSM4} },
4754 { "sha3", {AArch64::FeatureSHA3} },
4755 { "sha2", {AArch64::FeatureSHA2} },
4756 { "aes", {AArch64::FeatureAES} },
4757 { "crypto", {AArch64::FeatureCrypto} },
4758 { "fp", {AArch64::FeatureFPARMv8} },
4759 { "simd", {AArch64::FeatureNEON} },
4760 { "ras", {AArch64::FeatureRAS} },
4761 { "lse", {AArch64::FeatureLSE} },
4762
4763 // FIXME: Unsupported extensions
4764 { "pan", {} },
4765 { "lor", {} },
4766 { "rdma", {} },
4767 { "profile", {} },
4768};
4769
4770static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
4771 SmallVector<StringRef, 4> &RequestedExtensions) {
4772 const bool NoCrypto =
4773 (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
4774 "nocrypto") != std::end(RequestedExtensions));
4775 const bool Crypto =
4776 (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
4777 "crypto") != std::end(RequestedExtensions));
4778
4779 if (!NoCrypto && Crypto) {
4780 switch (ArchKind) {
4781 default:
4782 // Map 'generic' (and others) to sha2 and aes, because
4783 // that was the traditional meaning of crypto.
4784 case AArch64::ArchKind::ARMV8_1A:
4785 case AArch64::ArchKind::ARMV8_2A:
4786 case AArch64::ArchKind::ARMV8_3A:
4787 RequestedExtensions.push_back("sha2");
4788 RequestedExtensions.push_back("aes");
4789 break;
4790 case AArch64::ArchKind::ARMV8_4A:
4791 RequestedExtensions.push_back("sm4");
4792 RequestedExtensions.push_back("sha3");
4793 RequestedExtensions.push_back("sha2");
4794 RequestedExtensions.push_back("aes");
4795 break;
4796 }
4797 } else if (NoCrypto) {
4798 switch (ArchKind) {
4799 default:
4800 // Map 'generic' (and others) to sha2 and aes, because
4801 // that was the traditional meaning of crypto.
4802 case AArch64::ArchKind::ARMV8_1A:
4803 case AArch64::ArchKind::ARMV8_2A:
4804 case AArch64::ArchKind::ARMV8_3A:
4805 RequestedExtensions.push_back("nosha2");
4806 RequestedExtensions.push_back("noaes");
4807 break;
4808 case AArch64::ArchKind::ARMV8_4A:
4809 RequestedExtensions.push_back("nosm4");
4810 RequestedExtensions.push_back("nosha3");
4811 RequestedExtensions.push_back("nosha2");
4812 RequestedExtensions.push_back("noaes");
4813 break;
4814 }
4815 }
4816}
4817
4818/// parseDirectiveArch
4819/// ::= .arch token
4820bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
4821 SMLoc ArchLoc = getLoc();
4822
4823 StringRef Arch, ExtensionString;
4824 std::tie(Arch, ExtensionString) =
4825 getParser().parseStringToEndOfStatement().trim().split('+');
4826
4827 AArch64::ArchKind ID = AArch64::parseArch(Arch);
4828 if (ID == AArch64::ArchKind::INVALID)
4829 return Error(ArchLoc, "unknown arch name");
4830
4831 if (parseToken(AsmToken::EndOfStatement))
4832 return true;
4833
4834 // Get the architecture and extension features.
4835 std::vector<StringRef> AArch64Features;
4836 AArch64::getArchFeatures(ID, AArch64Features);
4837 AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
4838 AArch64Features);
4839
4840 MCSubtargetInfo &STI = copySTI();
4841 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
4842 STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
4843
4844 SmallVector<StringRef, 4> RequestedExtensions;
4845 if (!ExtensionString.empty())
4846 ExtensionString.split(RequestedExtensions, '+');
4847
4848 ExpandCryptoAEK(ID, RequestedExtensions);
4849
4850 FeatureBitset Features = STI.getFeatureBits();
4851 for (auto Name : RequestedExtensions) {
4852 bool EnableFeature = true;
4853
4854 if (Name.startswith_lower("no")) {
4855 EnableFeature = false;
4856 Name = Name.substr(2);
4857 }
4858
4859 for (const auto &Extension : ExtensionMap) {
4860 if (Extension.Name != Name)
4861 continue;
4862
4863 if (Extension.Features.none())
4864 report_fatal_error("unsupported architectural extension: " + Name);
4865
4866 FeatureBitset ToggleFeatures = EnableFeature
4867 ? (~Features & Extension.Features)
4868 : ( Features & Extension.Features);
4869 uint64_t Features =
4870 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
4871 setAvailableFeatures(Features);
4872 break;
4873 }
4874 }
4875 return false;
4876}
4877
4878static SMLoc incrementLoc(SMLoc L, int Offset) {
4879 return SMLoc::getFromPointer(L.getPointer() + Offset);
4880}
4881
4882/// parseDirectiveCPU
4883/// ::= .cpu id
4884bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
4885 SMLoc CurLoc = getLoc();
4886
4887 StringRef CPU, ExtensionString;
4888 std::tie(CPU, ExtensionString) =
4889 getParser().parseStringToEndOfStatement().trim().split('+');
4890
4891 if (parseToken(AsmToken::EndOfStatement))
4892 return true;
4893
4894 SmallVector<StringRef, 4> RequestedExtensions;
4895 if (!ExtensionString.empty())
4896 ExtensionString.split(RequestedExtensions, '+');
4897
4898 // FIXME This is using tablegen data, but should be moved to ARMTargetParser
4899 // once that is tablegen'ed
4900 if (!getSTI().isCPUStringValid(CPU)) {
4901 Error(CurLoc, "unknown CPU name");
4902 return false;
4903 }
4904
4905 MCSubtargetInfo &STI = copySTI();
4906 STI.setDefaultFeatures(CPU, "");
4907 CurLoc = incrementLoc(CurLoc, CPU.size());
4908
4909 ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
4910
4911 FeatureBitset Features = STI.getFeatureBits();
4912 for (auto Name : RequestedExtensions) {
4913 // Advance source location past '+'.
4914 CurLoc = incrementLoc(CurLoc, 1);
4915
4916 bool EnableFeature = true;
4917
4918 if (Name.startswith_lower("no")) {
4919 EnableFeature = false;
4920 Name = Name.substr(2);
4921 }
4922
4923 bool FoundExtension = false;
4924 for (const auto &Extension : ExtensionMap) {
4925 if (Extension.Name != Name)
4926 continue;
4927
4928 if (Extension.Features.none())
4929 report_fatal_error("unsupported architectural extension: " + Name);
4930
4931 FeatureBitset ToggleFeatures = EnableFeature
4932 ? (~Features & Extension.Features)
4933 : ( Features & Extension.Features);
4934 uint64_t Features =
4935 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
4936 setAvailableFeatures(Features);
4937 FoundExtension = true;
4938
4939 break;
4940 }
4941
4942 if (!FoundExtension)
4943 Error(CurLoc, "unsupported architectural extension");
4944
4945 CurLoc = incrementLoc(CurLoc, Name.size());
4946 }
4947 return false;
4948}
4949
4950/// parseDirectiveInst
4951/// ::= .inst opcode [, ...]
4952bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4953 if (getLexer().is(AsmToken::EndOfStatement))
4954 return Error(Loc, "expected expression following '.inst' directive");
4955
4956 auto parseOp = [&]() -> bool {
4957 SMLoc L = getLoc();
4958 const MCExpr *Expr;
4959 if (check(getParser().parseExpression(Expr), L, "expected expression"))
4960 return true;
4961 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4962 if (check(!Value, L, "expected constant expression"))
4963 return true;
4964 getTargetStreamer().emitInst(Value->getValue());
4965 return false;
4966 };
4967
4968 if (parseMany(parseOp))
4969 return addErrorSuffix(" in '.inst' directive");
4970 return false;
4971}
4972
4973// parseDirectiveTLSDescCall:
4974// ::= .tlsdesccall symbol
4975bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4976 StringRef Name;
4977 if (check(getParser().parseIdentifier(Name), L,
4978 "expected symbol after directive") ||
4979 parseToken(AsmToken::EndOfStatement))
4980 return true;
4981
4982 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4983 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4984 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4985
4986 MCInst Inst;
4987 Inst.setOpcode(AArch64::TLSDESCCALL);
4988 Inst.addOperand(MCOperand::createExpr(Expr));
4989
4990 getParser().getStreamer().EmitInstruction(Inst, getSTI());
4991 return false;
4992}
4993
4994/// ::= .loh <lohName | lohId> label1, ..., labelN
4995/// The number of arguments depends on the loh identifier.
4996bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4997 MCLOHType Kind;
4998 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4999 if (getParser().getTok().isNot(AsmToken::Integer))
5000 return TokError("expected an identifier or a number in directive");
5001 // We successfully get a numeric value for the identifier.
5002 // Check if it is valid.
5003 int64_t Id = getParser().getTok().getIntVal();
5004 if (Id <= -1U && !isValidMCLOHType(Id))
5005 return TokError("invalid numeric identifier in directive");
5006 Kind = (MCLOHType)Id;
5007 } else {
5008 StringRef Name = getTok().getIdentifier();
5009 // We successfully parse an identifier.
5010 // Check if it is a recognized one.
5011 int Id = MCLOHNameToId(Name);
5012
5013 if (Id == -1)
5014 return TokError("invalid identifier in directive");
5015 Kind = (MCLOHType)Id;
5016 }
5017 // Consume the identifier.
5018 Lex();
5019 // Get the number of arguments of this LOH.
5020 int NbArgs = MCLOHIdToNbArgs(Kind);
5021
5022 assert(NbArgs != -1 && "Invalid number of arguments")(static_cast <bool> (NbArgs != -1 && "Invalid number of arguments"
) ? void (0) : __assert_fail ("NbArgs != -1 && \"Invalid number of arguments\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5022, __extension__ __PRETTY_FUNCTION__))
;
5023
5024 SmallVector<MCSymbol *, 3> Args;
5025 for (int Idx = 0; Idx < NbArgs; ++Idx) {
5026 StringRef Name;
5027 if (getParser().parseIdentifier(Name))
5028 return TokError("expected identifier in directive");
5029 Args.push_back(getContext().getOrCreateSymbol(Name));
5030
5031 if (Idx + 1 == NbArgs)
5032 break;
5033 if (parseToken(AsmToken::Comma,
5034 "unexpected token in '" + Twine(IDVal) + "' directive"))
5035 return true;
5036 }
5037 if (parseToken(AsmToken::EndOfStatement,
5038 "unexpected token in '" + Twine(IDVal) + "' directive"))
5039 return true;
5040
5041 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
5042 return false;
5043}
5044
5045/// parseDirectiveLtorg
5046/// ::= .ltorg | .pool
5047bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5048 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5049 return true;
5050 getTargetStreamer().emitCurrentConstantPool();
5051 return false;
5052}
5053
5054/// parseDirectiveReq
5055/// ::= name .req registername
5056bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5057 MCAsmParser &Parser = getParser();
5058 Parser.Lex(); // Eat the '.req' token.
5059 SMLoc SRegLoc = getLoc();
5060 RegKind RegisterKind = RegKind::Scalar;
5061 unsigned RegNum;
5062 OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5063
5064 if (ParseRes != MatchOperand_Success) {
5065 StringRef Kind;
5066 RegisterKind = RegKind::NeonVector;
5067 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5068
5069 if (ParseRes == MatchOperand_ParseFail)
5070 return true;
5071
5072 if (ParseRes == MatchOperand_Success && !Kind.empty())
5073 return Error(SRegLoc, "vector register without type specifier expected");
5074 }
5075
5076 if (ParseRes != MatchOperand_Success) {
5077 StringRef Kind;
5078 RegisterKind = RegKind::SVEDataVector;
5079 ParseRes =
5080 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5081
5082 if (ParseRes == MatchOperand_ParseFail)
5083 return true;
5084
5085 if (ParseRes == MatchOperand_Success && !Kind.empty())
5086 return Error(SRegLoc,
5087 "sve vector register without type specifier expected");
5088 }
5089
5090 if (ParseRes != MatchOperand_Success) {
5091 StringRef Kind;
5092 RegisterKind = RegKind::SVEPredicateVector;
5093 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5094
5095 if (ParseRes == MatchOperand_ParseFail)
5096 return true;
5097
5098 if (ParseRes == MatchOperand_Success && !Kind.empty())
5099 return Error(SRegLoc,
5100 "sve predicate register without type specifier expected");
5101 }
5102
5103 if (ParseRes != MatchOperand_Success)
5104 return Error(SRegLoc, "register name or alias expected");
5105
5106 // Shouldn't be anything else.
5107 if (parseToken(AsmToken::EndOfStatement,
5108 "unexpected input in .req directive"))
5109 return true;
5110
5111 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
5112 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
5113 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
5114
5115 return false;
5116}
5117
5118/// parseDirectiveUneq
5119/// ::= .unreq registername
5120bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
5121 MCAsmParser &Parser = getParser();
5122 if (getTok().isNot(AsmToken::Identifier))
5123 return TokError("unexpected input in .unreq directive.");
5124 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
5125 Parser.Lex(); // Eat the identifier.
5126 if (parseToken(AsmToken::EndOfStatement))
5127 return addErrorSuffix("in '.unreq' directive");
5128 return false;
5129}
5130
5131bool
5132AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
5133 AArch64MCExpr::VariantKind &ELFRefKind,
5134 MCSymbolRefExpr::VariantKind &DarwinRefKind,
5135 int64_t &Addend) {
5136 ELFRefKind = AArch64MCExpr::VK_INVALID;
5137 DarwinRefKind = MCSymbolRefExpr::VK_None;
5138 Addend = 0;
5139
5140 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
5141 ELFRefKind = AE->getKind();
5142 Expr = AE->getSubExpr();
5143 }
5144
5145 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
5146 if (SE) {
5147 // It's a simple symbol reference with no addend.
5148 DarwinRefKind = SE->getKind();
5149 return true;
5150 }
5151
5152 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
5153 if (!BE)
5154 return false;
5155
5156 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
5157 if (!SE)
5158 return false;
5159 DarwinRefKind = SE->getKind();
5160
5161 if (BE->getOpcode() != MCBinaryExpr::Add &&
5162 BE->getOpcode() != MCBinaryExpr::Sub)
5163 return false;
5164
5165 // See if the addend is a constant, otherwise there's more going
5166 // on here than we can deal with.
5167 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
5168 if (!AddendExpr)
5169 return false;
5170
5171 Addend = AddendExpr->getValue();
5172 if (BE->getOpcode() == MCBinaryExpr::Sub)
5173 Addend = -Addend;
5174
5175 // It's some symbol reference + a constant addend, but really
5176 // shouldn't use both Darwin and ELF syntax.
5177 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
5178 DarwinRefKind == MCSymbolRefExpr::VK_None;
5179}
5180
5181/// Force static initialization.
5182extern "C" void LLVMInitializeAArch64AsmParser() {
5183 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
5184 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
5185 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
5186}
5187
5188#define GET_REGISTER_MATCHER
5189#define GET_SUBTARGET_FEATURE_NAME
5190#define GET_MATCHER_IMPLEMENTATION
5191#define GET_MNEMONIC_SPELL_CHECKER
5192#include "AArch64GenAsmMatcher.inc"
5193
5194// Define this matcher function after the auto-generated include so we
5195// have the match class enum definitions.
5196unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
5197 unsigned Kind) {
5198 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
5199 // If the kind is a token for a literal immediate, check if our asm
5200 // operand matches. This is for InstAliases which have a fixed-value
5201 // immediate in the syntax.
5202 int64_t ExpectedVal;
5203 switch (Kind) {
5204 default:
5205 return Match_InvalidOperand;
5206 case MCK__35_0:
5207 ExpectedVal = 0;
5208 break;
5209 case MCK__35_1:
5210 ExpectedVal = 1;
5211 break;
5212 case MCK__35_12:
5213 ExpectedVal = 12;
5214 break;
5215 case MCK__35_16:
5216 ExpectedVal = 16;
5217 break;
5218 case MCK__35_2:
5219 ExpectedVal = 2;
5220 break;
5221 case MCK__35_24:
5222 ExpectedVal = 24;
5223 break;
5224 case MCK__35_3:
5225 ExpectedVal = 3;
5226 break;
5227 case MCK__35_32:
5228 ExpectedVal = 32;
5229 break;
5230 case MCK__35_4:
5231 ExpectedVal = 4;
5232 break;
5233 case MCK__35_48:
5234 ExpectedVal = 48;
5235 break;
5236 case MCK__35_6:
5237 ExpectedVal = 6;
5238 break;
5239 case MCK__35_64:
5240 ExpectedVal = 64;
5241 break;
5242 case MCK__35_8:
5243 ExpectedVal = 8;
5244 break;
5245 }
5246 if (!Op.isImm())
5247 return Match_InvalidOperand;
5248 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
5249 if (!CE)
5250 return Match_InvalidOperand;
5251 if (CE->getValue() == ExpectedVal)
5252 return Match_Success;
5253 return Match_InvalidOperand;
5254}
5255
5256OperandMatchResultTy
5257AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
5258
5259 SMLoc S = getLoc();
5260
5261 if (getParser().getTok().isNot(AsmToken::Identifier)) {
5262 Error(S, "expected register");
5263 return MatchOperand_ParseFail;
5264 }
5265
5266 unsigned FirstReg;
5267 OperandMatchResultTy Res = tryParseScalarRegister(FirstReg);
5268 if (Res != MatchOperand_Success)
5269 return MatchOperand_ParseFail;
5270
5271 const MCRegisterClass &WRegClass =
5272 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
5273 const MCRegisterClass &XRegClass =
5274 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
5275
5276 bool isXReg = XRegClass.contains(FirstReg),
5277 isWReg = WRegClass.contains(FirstReg);
5278 if (!isXReg && !isWReg) {
5279 Error(S, "expected first even register of a "
5280 "consecutive same-size even/odd register pair");
5281 return MatchOperand_ParseFail;
5282 }
5283
5284 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5285 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
5286
5287 if (FirstEncoding & 0x1) {
5288 Error(S, "expected first even register of a "
5289 "consecutive same-size even/odd register pair");
5290 return MatchOperand_ParseFail;
5291 }
5292
5293 if (getParser().getTok().isNot(AsmToken::Comma)) {
5294 Error(getLoc(), "expected comma");
5295 return MatchOperand_ParseFail;
5296 }
5297 // Eat the comma
5298 getParser().Lex();
5299
5300 SMLoc E = getLoc();
5301 unsigned SecondReg;
5302 Res = tryParseScalarRegister(SecondReg);
5303 if (Res != MatchOperand_Success)
5304 return MatchOperand_ParseFail;
5305
5306 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
5307 (isXReg && !XRegClass.contains(SecondReg)) ||
5308 (isWReg && !WRegClass.contains(SecondReg))) {
5309 Error(E,"expected second odd register of a "
5310 "consecutive same-size even/odd register pair");
5311 return MatchOperand_ParseFail;
5312 }
5313
5314 unsigned Pair = 0;
5315 if (isXReg) {
5316 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
5317 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
5318 } else {
5319 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
5320 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
5321 }
5322
5323 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
5324 getLoc(), getContext()));
5325
5326 return MatchOperand_Success;
5327}
5328
5329template <bool ParseShiftExtend, bool ParseSuffix>
5330OperandMatchResultTy
5331AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
5332 const SMLoc S = getLoc();
5333 // Check for a SVE vector register specifier first.
5334 unsigned RegNum;
5335 StringRef Kind;
5336
5337 OperandMatchResultTy Res =
5338 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5339
5340 if (Res != MatchOperand_Success)
5341 return Res;
5342
5343 if (ParseSuffix && Kind.empty())
5344 return MatchOperand_NoMatch;
5345
5346 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
5347 if (!KindRes)
5348 return MatchOperand_NoMatch;
5349
5350 unsigned ElementWidth = KindRes->second;
5351
5352 // No shift/extend is the default.
5353 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
5354 Operands.push_back(AArch64Operand::CreateVectorReg(
5355 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
5356
5357 OperandMatchResultTy Res = tryParseVectorIndex(Operands);
5358 if (Res == MatchOperand_ParseFail)
5359 return MatchOperand_ParseFail;
5360 return MatchOperand_Success;
5361 }
5362
5363 // Eat the comma
5364 getParser().Lex();
5365
5366 // Match the shift
5367 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
5368 Res = tryParseOptionalShiftExtend(ExtOpnd);
5369 if (Res != MatchOperand_Success)
5370 return Res;
5371
5372 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
5373 Operands.push_back(AArch64Operand::CreateVectorReg(
5374 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
5375 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5376 Ext->hasShiftExtendAmount()));
5377
5378 return MatchOperand_Success;
5379}
5380
5381OperandMatchResultTy
5382AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
5383 MCAsmParser &Parser = getParser();
5384
5385 SMLoc SS = getLoc();
5386 const AsmToken &TokE = Parser.getTok();
5387 bool IsHash = TokE.is(AsmToken::Hash);
5388
5389 if (!IsHash && TokE.isNot(AsmToken::Identifier))
5390 return MatchOperand_NoMatch;
5391
5392 int64_t Pattern;
5393 if (IsHash) {
5394 Parser.Lex(); // Eat hash
5395
5396 // Parse the immediate operand.
5397 const MCExpr *ImmVal;
5398 SS = getLoc();
5399 if (Parser.parseExpression(ImmVal))
5400 return MatchOperand_ParseFail;
5401
5402 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
5403 if (!MCE)
5404 return MatchOperand_ParseFail;
5405
5406 Pattern = MCE->getValue();
5407 } else {
5408 // Parse the pattern
5409 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
5410 if (!Pat)
5411 return MatchOperand_NoMatch;
5412
5413 Parser.Lex();
5414 Pattern = Pat->Encoding;
5415 assert(Pattern >= 0 && Pattern < 32)(static_cast <bool> (Pattern >= 0 && Pattern
< 32) ? void (0) : __assert_fail ("Pattern >= 0 && Pattern < 32"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5415, __extension__ __PRETTY_FUNCTION__))
;
5416 }
5417
5418 Operands.push_back(
5419 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
5420 SS, getLoc(), getContext()));
5421
5422 return MatchOperand_Success;
5423}