Bug Summary

File:lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Warning:line 2661, column 31
The left operand of '!=' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AArch64AsmParser.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-9/lib/clang/9.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-9~svn358520/build-llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-9~svn358520/build-llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-9~svn358520/build-llvm/include -I /build/llvm-toolchain-snapshot-9~svn358520/include -I /build/llvm-toolchain-snapshot-9~svn358520/build-llvm/lib/Target/AArch64/AsmParser/.. -I /build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/.. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/9.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-9/lib/clang/9.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-9~svn358520/build-llvm/lib/Target/AArch64/AsmParser -fdebug-prefix-map=/build/llvm-toolchain-snapshot-9~svn358520=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2019-04-17-050842-1547-1 -x c++ /build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp -faddrsig
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "MCTargetDesc/AArch64AddressingModes.h"
10#include "MCTargetDesc/AArch64MCExpr.h"
11#include "MCTargetDesc/AArch64MCTargetDesc.h"
12#include "MCTargetDesc/AArch64TargetStreamer.h"
13#include "AArch64InstrInfo.h"
14#include "Utils/AArch64BaseInfo.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/ADT/StringExtras.h"
21#include "llvm/ADT/StringMap.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/ADT/StringSwitch.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/MC/MCContext.h"
26#include "llvm/MC/MCExpr.h"
27#include "llvm/MC/MCInst.h"
28#include "llvm/MC/MCLinkerOptimizationHint.h"
29#include "llvm/MC/MCObjectFileInfo.h"
30#include "llvm/MC/MCParser/MCAsmLexer.h"
31#include "llvm/MC/MCParser/MCAsmParser.h"
32#include "llvm/MC/MCParser/MCAsmParserExtension.h"
33#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
34#include "llvm/MC/MCParser/MCTargetAsmParser.h"
35#include "llvm/MC/MCRegisterInfo.h"
36#include "llvm/MC/MCStreamer.h"
37#include "llvm/MC/MCSubtargetInfo.h"
38#include "llvm/MC/MCSymbol.h"
39#include "llvm/MC/MCTargetOptions.h"
40#include "llvm/MC/SubtargetFeature.h"
41#include "llvm/MC/MCValue.h"
42#include "llvm/Support/Casting.h"
43#include "llvm/Support/Compiler.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/MathExtras.h"
46#include "llvm/Support/SMLoc.h"
47#include "llvm/Support/TargetParser.h"
48#include "llvm/Support/TargetRegistry.h"
49#include "llvm/Support/raw_ostream.h"
50#include <cassert>
51#include <cctype>
52#include <cstdint>
53#include <cstdio>
54#include <string>
55#include <tuple>
56#include <utility>
57#include <vector>
58
59using namespace llvm;
60
61namespace {
62
63enum class RegKind {
64 Scalar,
65 NeonVector,
66 SVEDataVector,
67 SVEPredicateVector
68};
69
70enum RegConstraintEqualityTy {
71 EqualsReg,
72 EqualsSuperReg,
73 EqualsSubReg
74};
75
76class AArch64AsmParser : public MCTargetAsmParser {
77private:
78 StringRef Mnemonic; ///< Instruction mnemonic.
79
80 // Map of register aliases registers via the .req directive.
81 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
82
83 class PrefixInfo {
84 public:
85 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
86 PrefixInfo Prefix;
87 switch (Inst.getOpcode()) {
88 case AArch64::MOVPRFX_ZZ:
89 Prefix.Active = true;
90 Prefix.Dst = Inst.getOperand(0).getReg();
91 break;
92 case AArch64::MOVPRFX_ZPmZ_B:
93 case AArch64::MOVPRFX_ZPmZ_H:
94 case AArch64::MOVPRFX_ZPmZ_S:
95 case AArch64::MOVPRFX_ZPmZ_D:
96 Prefix.Active = true;
97 Prefix.Predicated = true;
98 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
99 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 100, __PRETTY_FUNCTION__))
100 "No destructive element size set for movprfx")((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 100, __PRETTY_FUNCTION__))
;
101 Prefix.Dst = Inst.getOperand(0).getReg();
102 Prefix.Pg = Inst.getOperand(2).getReg();
103 break;
104 case AArch64::MOVPRFX_ZPzZ_B:
105 case AArch64::MOVPRFX_ZPzZ_H:
106 case AArch64::MOVPRFX_ZPzZ_S:
107 case AArch64::MOVPRFX_ZPzZ_D:
108 Prefix.Active = true;
109 Prefix.Predicated = true;
110 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
111 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 112, __PRETTY_FUNCTION__))
112 "No destructive element size set for movprfx")((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 112, __PRETTY_FUNCTION__))
;
113 Prefix.Dst = Inst.getOperand(0).getReg();
114 Prefix.Pg = Inst.getOperand(1).getReg();
115 break;
116 default:
117 break;
118 }
119
120 return Prefix;
121 }
122
123 PrefixInfo() : Active(false), Predicated(false) {}
124 bool isActive() const { return Active; }
125 bool isPredicated() const { return Predicated; }
126 unsigned getElementSize() const {
127 assert(Predicated)((Predicated) ? static_cast<void> (0) : __assert_fail (
"Predicated", "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 127, __PRETTY_FUNCTION__))
;
128 return ElementSize;
129 }
130 unsigned getDstReg() const { return Dst; }
131 unsigned getPgReg() const {
132 assert(Predicated)((Predicated) ? static_cast<void> (0) : __assert_fail (
"Predicated", "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 132, __PRETTY_FUNCTION__))
;
133 return Pg;
134 }
135
136 private:
137 bool Active;
138 bool Predicated;
139 unsigned ElementSize;
140 unsigned Dst;
141 unsigned Pg;
142 } NextPrefix;
143
144 AArch64TargetStreamer &getTargetStreamer() {
145 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
146 return static_cast<AArch64TargetStreamer &>(TS);
147 }
148
149 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
150
151 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
152 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
153 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
154 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
155 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
156 bool parseRegister(OperandVector &Operands);
157 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
158 bool parseNeonVectorList(OperandVector &Operands);
159 bool parseOptionalMulOperand(OperandVector &Operands);
160 bool parseOperand(OperandVector &Operands, bool isCondCode,
161 bool invertCondCode);
162
163 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
164 OperandVector &Operands);
165
166 bool parseDirectiveArch(SMLoc L);
167 bool parseDirectiveArchExtension(SMLoc L);
168 bool parseDirectiveCPU(SMLoc L);
169 bool parseDirectiveInst(SMLoc L);
170
171 bool parseDirectiveTLSDescCall(SMLoc L);
172
173 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
174 bool parseDirectiveLtorg(SMLoc L);
175
176 bool parseDirectiveReq(StringRef Name, SMLoc L);
177 bool parseDirectiveUnreq(SMLoc L);
178 bool parseDirectiveCFINegateRAState();
179 bool parseDirectiveCFIBKeyFrame();
180
181 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
182 SmallVectorImpl<SMLoc> &Loc);
183 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
184 OperandVector &Operands, MCStreamer &Out,
185 uint64_t &ErrorInfo,
186 bool MatchingInlineAsm) override;
187/// @name Auto-generated Match Functions
188/// {
189
190#define GET_ASSEMBLER_HEADER
191#include "AArch64GenAsmMatcher.inc"
192
193 /// }
194
195 OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
196 OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
197 RegKind MatchKind);
198 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
199 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
200 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
201 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
202 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
203 template <bool IsSVEPrefetch = false>
204 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
205 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
206 OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
207 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
208 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
209 template<bool AddFPZeroAsLiteral>
210 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
211 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
212 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
213 bool tryParseNeonVectorRegister(OperandVector &Operands);
214 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
215 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
216 template <bool ParseShiftExtend,
217 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
218 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
219 template <bool ParseShiftExtend, bool ParseSuffix>
220 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
221 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
222 template <RegKind VectorKind>
223 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
224 bool ExpectMatch = false);
225 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
226
227public:
228 enum AArch64MatchResultTy {
229 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
230#define GET_OPERAND_DIAGNOSTIC_TYPES
231#include "AArch64GenAsmMatcher.inc"
232 };
233 bool IsILP32;
234
235 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
236 const MCInstrInfo &MII, const MCTargetOptions &Options)
237 : MCTargetAsmParser(Options, STI, MII) {
238 IsILP32 = Options.getABIName() == "ilp32";
239 MCAsmParserExtension::Initialize(Parser);
240 MCStreamer &S = getParser().getStreamer();
241 if (S.getTargetStreamer() == nullptr)
242 new AArch64TargetStreamer(S);
243
244 // Alias .hword/.word/xword to the target-independent .2byte/.4byte/.8byte
245 // directives as they have the same form and semantics:
246 /// ::= (.hword | .word | .xword ) [ expression (, expression)* ]
247 Parser.addAliasForDirective(".hword", ".2byte");
248 Parser.addAliasForDirective(".word", ".4byte");
249 Parser.addAliasForDirective(".xword", ".8byte");
250
251 // Initialize the set of available features.
252 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
253 }
254
255 bool regsEqual(const MCParsedAsmOperand &Op1,
256 const MCParsedAsmOperand &Op2) const override;
257 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
258 SMLoc NameLoc, OperandVector &Operands) override;
259 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
260 bool ParseDirective(AsmToken DirectiveID) override;
261 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
262 unsigned Kind) override;
263
264 static bool classifySymbolRef(const MCExpr *Expr,
265 AArch64MCExpr::VariantKind &ELFRefKind,
266 MCSymbolRefExpr::VariantKind &DarwinRefKind,
267 int64_t &Addend);
268};
269
270/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
271/// instruction.
272class AArch64Operand : public MCParsedAsmOperand {
273private:
274 enum KindTy {
275 k_Immediate,
276 k_ShiftedImm,
277 k_CondCode,
278 k_Register,
279 k_VectorList,
280 k_VectorIndex,
281 k_Token,
282 k_SysReg,
283 k_SysCR,
284 k_Prefetch,
285 k_ShiftExtend,
286 k_FPImm,
287 k_Barrier,
288 k_PSBHint,
289 k_BTIHint,
290 } Kind;
291
292 SMLoc StartLoc, EndLoc;
293
294 struct TokOp {
295 const char *Data;
296 unsigned Length;
297 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
298 };
299
300 // Separate shift/extend operand.
301 struct ShiftExtendOp {
302 AArch64_AM::ShiftExtendType Type;
303 unsigned Amount;
304 bool HasExplicitAmount;
305 };
306
307 struct RegOp {
308 unsigned RegNum;
309 RegKind Kind;
310 int ElementWidth;
311
312 // The register may be allowed as a different register class,
313 // e.g. for GPR64as32 or GPR32as64.
314 RegConstraintEqualityTy EqualityTy;
315
316 // In some cases the shift/extend needs to be explicitly parsed together
317 // with the register, rather than as a separate operand. This is needed
318 // for addressing modes where the instruction as a whole dictates the
319 // scaling/extend, rather than specific bits in the instruction.
320 // By parsing them as a single operand, we avoid the need to pass an
321 // extra operand in all CodeGen patterns (because all operands need to
322 // have an associated value), and we avoid the need to update TableGen to
323 // accept operands that have no associated bits in the instruction.
324 //
325 // An added benefit of parsing them together is that the assembler
326 // can give a sensible diagnostic if the scaling is not correct.
327 //
328 // The default is 'lsl #0' (HasExplicitAmount = false) if no
329 // ShiftExtend is specified.
330 ShiftExtendOp ShiftExtend;
331 };
332
333 struct VectorListOp {
334 unsigned RegNum;
335 unsigned Count;
336 unsigned NumElements;
337 unsigned ElementWidth;
338 RegKind RegisterKind;
339 };
340
341 struct VectorIndexOp {
342 unsigned Val;
343 };
344
345 struct ImmOp {
346 const MCExpr *Val;
347 };
348
349 struct ShiftedImmOp {
350 const MCExpr *Val;
351 unsigned ShiftAmount;
352 };
353
354 struct CondCodeOp {
355 AArch64CC::CondCode Code;
356 };
357
358 struct FPImmOp {
359 uint64_t Val; // APFloat value bitcasted to uint64_t.
360 bool IsExact; // describes whether parsed value was exact.
361 };
362
363 struct BarrierOp {
364 const char *Data;
365 unsigned Length;
366 unsigned Val; // Not the enum since not all values have names.
367 };
368
369 struct SysRegOp {
370 const char *Data;
371 unsigned Length;
372 uint32_t MRSReg;
373 uint32_t MSRReg;
374 uint32_t PStateField;
375 };
376
377 struct SysCRImmOp {
378 unsigned Val;
379 };
380
381 struct PrefetchOp {
382 const char *Data;
383 unsigned Length;
384 unsigned Val;
385 };
386
387 struct PSBHintOp {
388 const char *Data;
389 unsigned Length;
390 unsigned Val;
391 };
392
393 struct BTIHintOp {
394 const char *Data;
395 unsigned Length;
396 unsigned Val;
397 };
398
399 struct ExtendOp {
400 unsigned Val;
401 };
402
403 union {
404 struct TokOp Tok;
405 struct RegOp Reg;
406 struct VectorListOp VectorList;
407 struct VectorIndexOp VectorIndex;
408 struct ImmOp Imm;
409 struct ShiftedImmOp ShiftedImm;
410 struct CondCodeOp CondCode;
411 struct FPImmOp FPImm;
412 struct BarrierOp Barrier;
413 struct SysRegOp SysReg;
414 struct SysCRImmOp SysCRImm;
415 struct PrefetchOp Prefetch;
416 struct PSBHintOp PSBHint;
417 struct BTIHintOp BTIHint;
418 struct ShiftExtendOp ShiftExtend;
419 };
420
421 // Keep the MCContext around as the MCExprs may need manipulated during
422 // the add<>Operands() calls.
423 MCContext &Ctx;
424
425public:
426 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
427
428 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
429 Kind = o.Kind;
430 StartLoc = o.StartLoc;
431 EndLoc = o.EndLoc;
432 switch (Kind) {
433 case k_Token:
434 Tok = o.Tok;
435 break;
436 case k_Immediate:
437 Imm = o.Imm;
438 break;
439 case k_ShiftedImm:
440 ShiftedImm = o.ShiftedImm;
441 break;
442 case k_CondCode:
443 CondCode = o.CondCode;
444 break;
445 case k_FPImm:
446 FPImm = o.FPImm;
447 break;
448 case k_Barrier:
449 Barrier = o.Barrier;
450 break;
451 case k_Register:
452 Reg = o.Reg;
453 break;
454 case k_VectorList:
455 VectorList = o.VectorList;
456 break;
457 case k_VectorIndex:
458 VectorIndex = o.VectorIndex;
459 break;
460 case k_SysReg:
461 SysReg = o.SysReg;
462 break;
463 case k_SysCR:
464 SysCRImm = o.SysCRImm;
465 break;
466 case k_Prefetch:
467 Prefetch = o.Prefetch;
468 break;
469 case k_PSBHint:
470 PSBHint = o.PSBHint;
471 break;
472 case k_BTIHint:
473 BTIHint = o.BTIHint;
474 break;
475 case k_ShiftExtend:
476 ShiftExtend = o.ShiftExtend;
477 break;
478 }
479 }
480
481 /// getStartLoc - Get the location of the first token of this operand.
482 SMLoc getStartLoc() const override { return StartLoc; }
483 /// getEndLoc - Get the location of the last token of this operand.
484 SMLoc getEndLoc() const override { return EndLoc; }
485
486 StringRef getToken() const {
487 assert(Kind == k_Token && "Invalid access!")((Kind == k_Token && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 487, __PRETTY_FUNCTION__))
;
488 return StringRef(Tok.Data, Tok.Length);
489 }
490
491 bool isTokenSuffix() const {
492 assert(Kind == k_Token && "Invalid access!")((Kind == k_Token && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 492, __PRETTY_FUNCTION__))
;
493 return Tok.IsSuffix;
494 }
495
496 const MCExpr *getImm() const {
497 assert(Kind == k_Immediate && "Invalid access!")((Kind == k_Immediate && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 497, __PRETTY_FUNCTION__))
;
498 return Imm.Val;
499 }
500
501 const MCExpr *getShiftedImmVal() const {
502 assert(Kind == k_ShiftedImm && "Invalid access!")((Kind == k_ShiftedImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 502, __PRETTY_FUNCTION__))
;
503 return ShiftedImm.Val;
504 }
505
506 unsigned getShiftedImmShift() const {
507 assert(Kind == k_ShiftedImm && "Invalid access!")((Kind == k_ShiftedImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 507, __PRETTY_FUNCTION__))
;
508 return ShiftedImm.ShiftAmount;
509 }
510
511 AArch64CC::CondCode getCondCode() const {
512 assert(Kind == k_CondCode && "Invalid access!")((Kind == k_CondCode && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 512, __PRETTY_FUNCTION__))
;
513 return CondCode.Code;
514 }
515
516 APFloat getFPImm() const {
517 assert (Kind == k_FPImm && "Invalid access!")((Kind == k_FPImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 517, __PRETTY_FUNCTION__))
;
518 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
519 }
520
521 bool getFPImmIsExact() const {
522 assert (Kind == k_FPImm && "Invalid access!")((Kind == k_FPImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 522, __PRETTY_FUNCTION__))
;
523 return FPImm.IsExact;
524 }
525
526 unsigned getBarrier() const {
527 assert(Kind == k_Barrier && "Invalid access!")((Kind == k_Barrier && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 527, __PRETTY_FUNCTION__))
;
528 return Barrier.Val;
529 }
530
531 StringRef getBarrierName() const {
532 assert(Kind == k_Barrier && "Invalid access!")((Kind == k_Barrier && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 532, __PRETTY_FUNCTION__))
;
533 return StringRef(Barrier.Data, Barrier.Length);
534 }
535
536 unsigned getReg() const override {
537 assert(Kind == k_Register && "Invalid access!")((Kind == k_Register && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 537, __PRETTY_FUNCTION__))
;
538 return Reg.RegNum;
539 }
540
541 RegConstraintEqualityTy getRegEqualityTy() const {
542 assert(Kind == k_Register && "Invalid access!")((Kind == k_Register && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 542, __PRETTY_FUNCTION__))
;
543 return Reg.EqualityTy;
544 }
545
546 unsigned getVectorListStart() const {
547 assert(Kind == k_VectorList && "Invalid access!")((Kind == k_VectorList && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 547, __PRETTY_FUNCTION__))
;
548 return VectorList.RegNum;
549 }
550
551 unsigned getVectorListCount() const {
552 assert(Kind == k_VectorList && "Invalid access!")((Kind == k_VectorList && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 552, __PRETTY_FUNCTION__))
;
553 return VectorList.Count;
554 }
555
556 unsigned getVectorIndex() const {
557 assert(Kind == k_VectorIndex && "Invalid access!")((Kind == k_VectorIndex && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 557, __PRETTY_FUNCTION__))
;
558 return VectorIndex.Val;
559 }
560
561 StringRef getSysReg() const {
562 assert(Kind == k_SysReg && "Invalid access!")((Kind == k_SysReg && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 562, __PRETTY_FUNCTION__))
;
563 return StringRef(SysReg.Data, SysReg.Length);
564 }
565
566 unsigned getSysCR() const {
567 assert(Kind == k_SysCR && "Invalid access!")((Kind == k_SysCR && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 567, __PRETTY_FUNCTION__))
;
568 return SysCRImm.Val;
569 }
570
571 unsigned getPrefetch() const {
572 assert(Kind == k_Prefetch && "Invalid access!")((Kind == k_Prefetch && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 572, __PRETTY_FUNCTION__))
;
573 return Prefetch.Val;
574 }
575
576 unsigned getPSBHint() const {
577 assert(Kind == k_PSBHint && "Invalid access!")((Kind == k_PSBHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 577, __PRETTY_FUNCTION__))
;
578 return PSBHint.Val;
579 }
580
581 StringRef getPSBHintName() const {
582 assert(Kind == k_PSBHint && "Invalid access!")((Kind == k_PSBHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 582, __PRETTY_FUNCTION__))
;
583 return StringRef(PSBHint.Data, PSBHint.Length);
584 }
585
586 unsigned getBTIHint() const {
587 assert(Kind == k_BTIHint && "Invalid access!")((Kind == k_BTIHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 587, __PRETTY_FUNCTION__))
;
588 return BTIHint.Val;
589 }
590
591 StringRef getBTIHintName() const {
592 assert(Kind == k_BTIHint && "Invalid access!")((Kind == k_BTIHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 592, __PRETTY_FUNCTION__))
;
593 return StringRef(BTIHint.Data, BTIHint.Length);
594 }
595
596 StringRef getPrefetchName() const {
597 assert(Kind == k_Prefetch && "Invalid access!")((Kind == k_Prefetch && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 597, __PRETTY_FUNCTION__))
;
598 return StringRef(Prefetch.Data, Prefetch.Length);
599 }
600
601 AArch64_AM::ShiftExtendType getShiftExtendType() const {
602 if (Kind == k_ShiftExtend)
603 return ShiftExtend.Type;
604 if (Kind == k_Register)
605 return Reg.ShiftExtend.Type;
606 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 606)
;
607 }
608
609 unsigned getShiftExtendAmount() const {
610 if (Kind == k_ShiftExtend)
611 return ShiftExtend.Amount;
612 if (Kind == k_Register)
613 return Reg.ShiftExtend.Amount;
614 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 614)
;
615 }
616
617 bool hasShiftExtendAmount() const {
618 if (Kind == k_ShiftExtend)
619 return ShiftExtend.HasExplicitAmount;
620 if (Kind == k_Register)
621 return Reg.ShiftExtend.HasExplicitAmount;
622 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 622)
;
623 }
624
625 bool isImm() const override { return Kind == k_Immediate; }
626 bool isMem() const override { return false; }
627
628 bool isUImm6() const {
629 if (!isImm())
630 return false;
631 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
632 if (!MCE)
633 return false;
634 int64_t Val = MCE->getValue();
635 return (Val >= 0 && Val < 64);
636 }
637
638 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
639
640 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
641 return isImmScaled<Bits, Scale>(true);
642 }
643
644 template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
645 return isImmScaled<Bits, Scale>(false);
646 }
647
648 template <int Bits, int Scale>
649 DiagnosticPredicate isImmScaled(bool Signed) const {
650 if (!isImm())
651 return DiagnosticPredicateTy::NoMatch;
652
653 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
654 if (!MCE)
655 return DiagnosticPredicateTy::NoMatch;
656
657 int64_t MinVal, MaxVal;
658 if (Signed) {
659 int64_t Shift = Bits - 1;
660 MinVal = (int64_t(1) << Shift) * -Scale;
661 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
662 } else {
663 MinVal = 0;
664 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
665 }
666
667 int64_t Val = MCE->getValue();
668 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
669 return DiagnosticPredicateTy::Match;
670
671 return DiagnosticPredicateTy::NearMatch;
672 }
673
674 DiagnosticPredicate isSVEPattern() const {
675 if (!isImm())
676 return DiagnosticPredicateTy::NoMatch;
677 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
678 if (!MCE)
679 return DiagnosticPredicateTy::NoMatch;
680 int64_t Val = MCE->getValue();
681 if (Val >= 0 && Val < 32)
682 return DiagnosticPredicateTy::Match;
683 return DiagnosticPredicateTy::NearMatch;
684 }
685
686 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
687 AArch64MCExpr::VariantKind ELFRefKind;
688 MCSymbolRefExpr::VariantKind DarwinRefKind;
689 int64_t Addend;
690 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
691 Addend)) {
692 // If we don't understand the expression, assume the best and
693 // let the fixup and relocation code deal with it.
694 return true;
695 }
696
697 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
698 ELFRefKind == AArch64MCExpr::VK_LO12 ||
699 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
700 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
701 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
702 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
703 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
704 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
705 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
706 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
707 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
708 // Note that we don't range-check the addend. It's adjusted modulo page
709 // size when converted, so there is no "out of range" condition when using
710 // @pageoff.
711 return true;
712 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
713 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
714 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
715 return Addend == 0;
716 }
717
718 return false;
719 }
720
721 template <int Scale> bool isUImm12Offset() const {
722 if (!isImm())
723 return false;
724
725 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
726 if (!MCE)
727 return isSymbolicUImm12Offset(getImm());
728
729 int64_t Val = MCE->getValue();
730 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
731 }
732
733 template <int N, int M>
734 bool isImmInRange() const {
735 if (!isImm())
736 return false;
737 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
738 if (!MCE)
739 return false;
740 int64_t Val = MCE->getValue();
741 return (Val >= N && Val <= M);
742 }
743
744 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
745 // a logical immediate can always be represented when inverted.
746 template <typename T>
747 bool isLogicalImm() const {
748 if (!isImm())
749 return false;
750 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
751 if (!MCE)
752 return false;
753
754 int64_t Val = MCE->getValue();
755 int64_t SVal = typename std::make_signed<T>::type(Val);
756 int64_t UVal = typename std::make_unsigned<T>::type(Val);
757 if (Val != SVal && Val != UVal)
758 return false;
759
760 return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8);
761 }
762
763 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
764
765 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
766 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
767 /// immediate that can be shifted by 'Shift'.
768 template <unsigned Width>
769 Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
770 if (isShiftedImm() && Width == getShiftedImmShift())
771 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
772 return std::make_pair(CE->getValue(), Width);
773
774 if (isImm())
775 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
776 int64_t Val = CE->getValue();
777 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
778 return std::make_pair(Val >> Width, Width);
779 else
780 return std::make_pair(Val, 0u);
781 }
782
783 return {};
784 }
785
786 bool isAddSubImm() const {
787 if (!isShiftedImm() && !isImm())
788 return false;
789
790 const MCExpr *Expr;
791
792 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
793 if (isShiftedImm()) {
794 unsigned Shift = ShiftedImm.ShiftAmount;
795 Expr = ShiftedImm.Val;
796 if (Shift != 0 && Shift != 12)
797 return false;
798 } else {
799 Expr = getImm();
800 }
801
802 AArch64MCExpr::VariantKind ELFRefKind;
803 MCSymbolRefExpr::VariantKind DarwinRefKind;
804 int64_t Addend;
805 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
806 DarwinRefKind, Addend)) {
807 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
808 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
809 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
810 || ELFRefKind == AArch64MCExpr::VK_LO12
811 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
812 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
813 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
814 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
815 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
816 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
817 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
818 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
819 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
820 }
821
822 // If it's a constant, it should be a real immediate in range.
823 if (auto ShiftedVal = getShiftedVal<12>())
824 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
825
826 // If it's an expression, we hope for the best and let the fixup/relocation
827 // code deal with it.
828 return true;
829 }
830
831 bool isAddSubImmNeg() const {
832 if (!isShiftedImm() && !isImm())
833 return false;
834
835 // Otherwise it should be a real negative immediate in range.
836 if (auto ShiftedVal = getShiftedVal<12>())
837 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
838
839 return false;
840 }
841
842 // Signed value in the range -128 to +127. For element widths of
843 // 16 bits or higher it may also be a signed multiple of 256 in the
844 // range -32768 to +32512.
845 // For element-width of 8 bits a range of -128 to 255 is accepted,
846 // since a copy of a byte can be either signed/unsigned.
847 template <typename T>
848 DiagnosticPredicate isSVECpyImm() const {
849 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
850 return DiagnosticPredicateTy::NoMatch;
851
852 bool IsByte =
853 std::is_same<int8_t, typename std::make_signed<T>::type>::value;
854 if (auto ShiftedImm = getShiftedVal<8>())
855 if (!(IsByte && ShiftedImm->second) &&
856 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
857 << ShiftedImm->second))
858 return DiagnosticPredicateTy::Match;
859
860 return DiagnosticPredicateTy::NearMatch;
861 }
862
863 // Unsigned value in the range 0 to 255. For element widths of
864 // 16 bits or higher it may also be a signed multiple of 256 in the
865 // range 0 to 65280.
866 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
867 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
868 return DiagnosticPredicateTy::NoMatch;
869
870 bool IsByte =
871 std::is_same<int8_t, typename std::make_signed<T>::type>::value;
872 if (auto ShiftedImm = getShiftedVal<8>())
873 if (!(IsByte && ShiftedImm->second) &&
874 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
875 << ShiftedImm->second))
876 return DiagnosticPredicateTy::Match;
877
878 return DiagnosticPredicateTy::NearMatch;
879 }
880
881 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
882 if (isLogicalImm<T>() && !isSVECpyImm<T>())
883 return DiagnosticPredicateTy::Match;
884 return DiagnosticPredicateTy::NoMatch;
885 }
886
887 bool isCondCode() const { return Kind == k_CondCode; }
888
889 bool isSIMDImmType10() const {
890 if (!isImm())
891 return false;
892 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
893 if (!MCE)
894 return false;
895 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
896 }
897
898 template<int N>
899 bool isBranchTarget() const {
900 if (!isImm())
901 return false;
902 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
903 if (!MCE)
904 return true;
905 int64_t Val = MCE->getValue();
906 if (Val & 0x3)
907 return false;
908 assert(N > 0 && "Branch target immediate cannot be 0 bits!")((N > 0 && "Branch target immediate cannot be 0 bits!"
) ? static_cast<void> (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 908, __PRETTY_FUNCTION__))
;
909 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
910 }
911
912 bool
913 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
914 if (!isImm())
915 return false;
916
917 AArch64MCExpr::VariantKind ELFRefKind;
918 MCSymbolRefExpr::VariantKind DarwinRefKind;
919 int64_t Addend;
920 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
921 DarwinRefKind, Addend)) {
922 return false;
923 }
924 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
925 return false;
926
927 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
928 if (ELFRefKind == AllowedModifiers[i])
929 return true;
930 }
931
932 return false;
933 }
934
935 bool isMovZSymbolG3() const {
936 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
937 }
938
939 bool isMovZSymbolG2() const {
940 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
941 AArch64MCExpr::VK_TPREL_G2,
942 AArch64MCExpr::VK_DTPREL_G2});
943 }
944
945 bool isMovZSymbolG1() const {
946 return isMovWSymbol({
947 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
948 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
949 AArch64MCExpr::VK_DTPREL_G1,
950 });
951 }
952
953 bool isMovZSymbolG0() const {
954 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
955 AArch64MCExpr::VK_TPREL_G0,
956 AArch64MCExpr::VK_DTPREL_G0});
957 }
958
959 bool isMovKSymbolG3() const {
960 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
961 }
962
963 bool isMovKSymbolG2() const {
964 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
965 }
966
967 bool isMovKSymbolG1() const {
968 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
969 AArch64MCExpr::VK_TPREL_G1_NC,
970 AArch64MCExpr::VK_DTPREL_G1_NC});
971 }
972
973 bool isMovKSymbolG0() const {
974 return isMovWSymbol(
975 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
976 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
977 }
978
979 template<int RegWidth, int Shift>
980 bool isMOVZMovAlias() const {
981 if (!isImm()) return false;
982
983 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
984 if (!CE) return false;
985 uint64_t Value = CE->getValue();
986
987 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
988 }
989
990 template<int RegWidth, int Shift>
991 bool isMOVNMovAlias() const {
992 if (!isImm()) return false;
993
994 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
995 if (!CE) return false;
996 uint64_t Value = CE->getValue();
997
998 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
999 }
1000
1001 bool isFPImm() const {
1002 return Kind == k_FPImm &&
1003 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1004 }
1005
1006 bool isBarrier() const { return Kind == k_Barrier; }
1007 bool isSysReg() const { return Kind == k_SysReg; }
1008
1009 bool isMRSSystemRegister() const {
1010 if (!isSysReg()) return false;
1011
1012 return SysReg.MRSReg != -1U;
1013 }
1014
1015 bool isMSRSystemRegister() const {
1016 if (!isSysReg()) return false;
1017 return SysReg.MSRReg != -1U;
1018 }
1019
1020 bool isSystemPStateFieldWithImm0_1() const {
1021 if (!isSysReg()) return false;
1022 return (SysReg.PStateField == AArch64PState::PAN ||
1023 SysReg.PStateField == AArch64PState::DIT ||
1024 SysReg.PStateField == AArch64PState::UAO ||
1025 SysReg.PStateField == AArch64PState::SSBS);
1026 }
1027
1028 bool isSystemPStateFieldWithImm0_15() const {
1029 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1030 return SysReg.PStateField != -1U;
1031 }
1032
1033 bool isReg() const override {
1034 return Kind == k_Register;
1035 }
1036
1037 bool isScalarReg() const {
1038 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1039 }
1040
1041 bool isNeonVectorReg() const {
1042 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1043 }
1044
1045 bool isNeonVectorRegLo() const {
1046 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1047 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1048 Reg.RegNum);
1049 }
1050
1051 template <unsigned Class> bool isSVEVectorReg() const {
1052 RegKind RK;
1053 switch (Class) {
1054 case AArch64::ZPRRegClassID:
1055 case AArch64::ZPR_3bRegClassID:
1056 case AArch64::ZPR_4bRegClassID:
1057 RK = RegKind::SVEDataVector;
1058 break;
1059 case AArch64::PPRRegClassID:
1060 case AArch64::PPR_3bRegClassID:
1061 RK = RegKind::SVEPredicateVector;
1062 break;
1063 default:
1064 llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class",
"/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1064)
;
1065 }
1066
1067 return (Kind == k_Register && Reg.Kind == RK) &&
1068 AArch64MCRegisterClasses[Class].contains(getReg());
1069 }
1070
1071 template <unsigned Class> bool isFPRasZPR() const {
1072 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1073 AArch64MCRegisterClasses[Class].contains(getReg());
1074 }
1075
1076 template <int ElementWidth, unsigned Class>
1077 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1078 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1079 return DiagnosticPredicateTy::NoMatch;
1080
1081 if (isSVEVectorReg<Class>() &&
1082 (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
1083 return DiagnosticPredicateTy::Match;
1084
1085 return DiagnosticPredicateTy::NearMatch;
1086 }
1087
1088 template <int ElementWidth, unsigned Class>
1089 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1090 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1091 return DiagnosticPredicateTy::NoMatch;
1092
1093 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1094 return DiagnosticPredicateTy::Match;
1095
1096 return DiagnosticPredicateTy::NearMatch;
1097 }
1098
1099 template <int ElementWidth, unsigned Class,
1100 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1101 bool ShiftWidthAlwaysSame>
1102 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1103 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1104 if (!VectorMatch.isMatch())
1105 return DiagnosticPredicateTy::NoMatch;
1106
1107 // Give a more specific diagnostic when the user has explicitly typed in
1108 // a shift-amount that does not match what is expected, but for which
1109 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1110 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1111 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1112 ShiftExtendTy == AArch64_AM::SXTW) &&
1113 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1114 return DiagnosticPredicateTy::NoMatch;
1115
1116 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1117 return DiagnosticPredicateTy::Match;
1118
1119 return DiagnosticPredicateTy::NearMatch;
1120 }
1121
1122 bool isGPR32as64() const {
1123 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1124 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1125 }
1126
1127 bool isGPR64as32() const {
1128 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1129 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1130 }
1131
1132 bool isWSeqPair() const {
1133 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1134 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1135 Reg.RegNum);
1136 }
1137
1138 bool isXSeqPair() const {
1139 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1140 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1141 Reg.RegNum);
1142 }
1143
1144 template<int64_t Angle, int64_t Remainder>
1145 DiagnosticPredicate isComplexRotation() const {
1146 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1147
1148 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1149 if (!CE) return DiagnosticPredicateTy::NoMatch;
1150 uint64_t Value = CE->getValue();
1151
1152 if (Value % Angle == Remainder && Value <= 270)
1153 return DiagnosticPredicateTy::Match;
1154 return DiagnosticPredicateTy::NearMatch;
1155 }
1156
1157 template <unsigned RegClassID> bool isGPR64() const {
1158 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1159 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1160 }
1161
1162 template <unsigned RegClassID, int ExtWidth>
1163 DiagnosticPredicate isGPR64WithShiftExtend() const {
1164 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1165 return DiagnosticPredicateTy::NoMatch;
1166
1167 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1168 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1169 return DiagnosticPredicateTy::Match;
1170 return DiagnosticPredicateTy::NearMatch;
1171 }
1172
1173 /// Is this a vector list with the type implicit (presumably attached to the
1174 /// instruction itself)?
1175 template <RegKind VectorKind, unsigned NumRegs>
1176 bool isImplicitlyTypedVectorList() const {
1177 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1178 VectorList.NumElements == 0 &&
1179 VectorList.RegisterKind == VectorKind;
1180 }
1181
1182 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1183 unsigned ElementWidth>
1184 bool isTypedVectorList() const {
1185 if (Kind != k_VectorList)
1186 return false;
1187 if (VectorList.Count != NumRegs)
1188 return false;
1189 if (VectorList.RegisterKind != VectorKind)
1190 return false;
1191 if (VectorList.ElementWidth != ElementWidth)
1192 return false;
1193 return VectorList.NumElements == NumElements;
1194 }
1195
1196 template <int Min, int Max>
1197 DiagnosticPredicate isVectorIndex() const {
1198 if (Kind != k_VectorIndex)
1199 return DiagnosticPredicateTy::NoMatch;
1200 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1201 return DiagnosticPredicateTy::Match;
1202 return DiagnosticPredicateTy::NearMatch;
1203 }
1204
1205 bool isToken() const override { return Kind == k_Token; }
1206
1207 bool isTokenEqual(StringRef Str) const {
1208 return Kind == k_Token && getToken() == Str;
1209 }
1210 bool isSysCR() const { return Kind == k_SysCR; }
1211 bool isPrefetch() const { return Kind == k_Prefetch; }
1212 bool isPSBHint() const { return Kind == k_PSBHint; }
1213 bool isBTIHint() const { return Kind == k_BTIHint; }
1214 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1215 bool isShifter() const {
1216 if (!isShiftExtend())
1217 return false;
1218
1219 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1220 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1221 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1222 ST == AArch64_AM::MSL);
1223 }
1224
1225 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1226 if (Kind != k_FPImm)
1227 return DiagnosticPredicateTy::NoMatch;
1228
1229 if (getFPImmIsExact()) {
1230 // Lookup the immediate from table of supported immediates.
1231 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1232 assert(Desc && "Unknown enum value")((Desc && "Unknown enum value") ? static_cast<void
> (0) : __assert_fail ("Desc && \"Unknown enum value\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1232, __PRETTY_FUNCTION__))
;
1233
1234 // Calculate its FP value.
1235 APFloat RealVal(APFloat::IEEEdouble());
1236 if (RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero) !=
1237 APFloat::opOK)
1238 llvm_unreachable("FP immediate is not exact")::llvm::llvm_unreachable_internal("FP immediate is not exact"
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1238)
;
1239
1240 if (getFPImm().bitwiseIsEqual(RealVal))
1241 return DiagnosticPredicateTy::Match;
1242 }
1243
1244 return DiagnosticPredicateTy::NearMatch;
1245 }
1246
1247 template <unsigned ImmA, unsigned ImmB>
1248 DiagnosticPredicate isExactFPImm() const {
1249 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1250 if ((Res = isExactFPImm<ImmA>()))
1251 return DiagnosticPredicateTy::Match;
1252 if ((Res = isExactFPImm<ImmB>()))
1253 return DiagnosticPredicateTy::Match;
1254 return Res;
1255 }
1256
1257 bool isExtend() const {
1258 if (!isShiftExtend())
1259 return false;
1260
1261 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1262 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1263 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1264 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1265 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1266 ET == AArch64_AM::LSL) &&
1267 getShiftExtendAmount() <= 4;
1268 }
1269
1270 bool isExtend64() const {
1271 if (!isExtend())
1272 return false;
1273 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
1274 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1275 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
1276 }
1277
1278 bool isExtendLSL64() const {
1279 if (!isExtend())
1280 return false;
1281 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1282 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1283 ET == AArch64_AM::LSL) &&
1284 getShiftExtendAmount() <= 4;
1285 }
1286
1287 template<int Width> bool isMemXExtend() const {
1288 if (!isExtend())
1289 return false;
1290 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1291 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1292 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1293 getShiftExtendAmount() == 0);
1294 }
1295
1296 template<int Width> bool isMemWExtend() const {
1297 if (!isExtend())
1298 return false;
1299 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1300 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1301 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1302 getShiftExtendAmount() == 0);
1303 }
1304
1305 template <unsigned width>
1306 bool isArithmeticShifter() const {
1307 if (!isShifter())
1308 return false;
1309
1310 // An arithmetic shifter is LSL, LSR, or ASR.
1311 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1312 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1313 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1314 }
1315
1316 template <unsigned width>
1317 bool isLogicalShifter() const {
1318 if (!isShifter())
1319 return false;
1320
1321 // A logical shifter is LSL, LSR, ASR or ROR.
1322 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1323 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1324 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1325 getShiftExtendAmount() < width;
1326 }
1327
1328 bool isMovImm32Shifter() const {
1329 if (!isShifter())
1330 return false;
1331
1332 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1333 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1334 if (ST != AArch64_AM::LSL)
1335 return false;
1336 uint64_t Val = getShiftExtendAmount();
1337 return (Val == 0 || Val == 16);
1338 }
1339
1340 bool isMovImm64Shifter() const {
1341 if (!isShifter())
1342 return false;
1343
1344 // A MOVi shifter is LSL of 0 or 16.
1345 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1346 if (ST != AArch64_AM::LSL)
1347 return false;
1348 uint64_t Val = getShiftExtendAmount();
1349 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1350 }
1351
1352 bool isLogicalVecShifter() const {
1353 if (!isShifter())
1354 return false;
1355
1356 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1357 unsigned Shift = getShiftExtendAmount();
1358 return getShiftExtendType() == AArch64_AM::LSL &&
1359 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1360 }
1361
1362 bool isLogicalVecHalfWordShifter() const {
1363 if (!isLogicalVecShifter())
1364 return false;
1365
1366 // A logical vector shifter is a left shift by 0 or 8.
1367 unsigned Shift = getShiftExtendAmount();
1368 return getShiftExtendType() == AArch64_AM::LSL &&
1369 (Shift == 0 || Shift == 8);
1370 }
1371
1372 bool isMoveVecShifter() const {
1373 if (!isShiftExtend())
1374 return false;
1375
1376 // A logical vector shifter is a left shift by 8 or 16.
1377 unsigned Shift = getShiftExtendAmount();
1378 return getShiftExtendType() == AArch64_AM::MSL &&
1379 (Shift == 8 || Shift == 16);
1380 }
1381
1382 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1383 // to LDUR/STUR when the offset is not legal for the former but is for
1384 // the latter. As such, in addition to checking for being a legal unscaled
1385 // address, also check that it is not a legal scaled address. This avoids
1386 // ambiguity in the matcher.
1387 template<int Width>
1388 bool isSImm9OffsetFB() const {
1389 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1390 }
1391
1392 bool isAdrpLabel() const {
1393 // Validation was handled during parsing, so we just sanity check that
1394 // something didn't go haywire.
1395 if (!isImm())
1396 return false;
1397
1398 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1399 int64_t Val = CE->getValue();
1400 int64_t Min = - (4096 * (1LL << (21 - 1)));
1401 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1402 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1403 }
1404
1405 return true;
1406 }
1407
1408 bool isAdrLabel() const {
1409 // Validation was handled during parsing, so we just sanity check that
1410 // something didn't go haywire.
1411 if (!isImm())
1412 return false;
1413
1414 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1415 int64_t Val = CE->getValue();
1416 int64_t Min = - (1LL << (21 - 1));
1417 int64_t Max = ((1LL << (21 - 1)) - 1);
1418 return Val >= Min && Val <= Max;
1419 }
1420
1421 return true;
1422 }
1423
1424 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1425 // Add as immediates when possible. Null MCExpr = 0.
1426 if (!Expr)
1427 Inst.addOperand(MCOperand::createImm(0));
1428 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1429 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1430 else
1431 Inst.addOperand(MCOperand::createExpr(Expr));
1432 }
1433
1434 void addRegOperands(MCInst &Inst, unsigned N) const {
1435 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1435, __PRETTY_FUNCTION__))
;
1436 Inst.addOperand(MCOperand::createReg(getReg()));
1437 }
1438
1439 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1440 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1440, __PRETTY_FUNCTION__))
;
1441 assert(((AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1442, __PRETTY_FUNCTION__))
1442 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1442, __PRETTY_FUNCTION__))
;
1443
1444 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1445 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1446 RI->getEncodingValue(getReg()));
1447
1448 Inst.addOperand(MCOperand::createReg(Reg));
1449 }
1450
1451 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1452 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1452, __PRETTY_FUNCTION__))
;
1453 assert(((AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1454, __PRETTY_FUNCTION__))
1454 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1454, __PRETTY_FUNCTION__))
;
1455
1456 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1457 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1458 RI->getEncodingValue(getReg()));
1459
1460 Inst.addOperand(MCOperand::createReg(Reg));
1461 }
1462
1463 template <int Width>
1464 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1465 unsigned Base;
1466 switch (Width) {
1467 case 8: Base = AArch64::B0; break;
1468 case 16: Base = AArch64::H0; break;
1469 case 32: Base = AArch64::S0; break;
1470 case 64: Base = AArch64::D0; break;
1471 case 128: Base = AArch64::Q0; break;
1472 default:
1473 llvm_unreachable("Unsupported width")::llvm::llvm_unreachable_internal("Unsupported width", "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1473)
;
1474 }
1475 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1476 }
1477
1478 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1479 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1479, __PRETTY_FUNCTION__))
;
1480 assert(((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1481, __PRETTY_FUNCTION__))
1481 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1481, __PRETTY_FUNCTION__))
;
1482 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1483 }
1484
1485 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1486 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1486, __PRETTY_FUNCTION__))
;
1487 assert(((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1488, __PRETTY_FUNCTION__))
1488 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1488, __PRETTY_FUNCTION__))
;
1489 Inst.addOperand(MCOperand::createReg(getReg()));
1490 }
1491
1492 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1493 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1493, __PRETTY_FUNCTION__))
;
1494 Inst.addOperand(MCOperand::createReg(getReg()));
1495 }
1496
1497 enum VecListIndexType {
1498 VecListIdx_DReg = 0,
1499 VecListIdx_QReg = 1,
1500 VecListIdx_ZReg = 2,
1501 };
1502
1503 template <VecListIndexType RegTy, unsigned NumRegs>
1504 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1505 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1505, __PRETTY_FUNCTION__))
;
1506 static const unsigned FirstRegs[][5] = {
1507 /* DReg */ { AArch64::Q0,
1508 AArch64::D0, AArch64::D0_D1,
1509 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1510 /* QReg */ { AArch64::Q0,
1511 AArch64::Q0, AArch64::Q0_Q1,
1512 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1513 /* ZReg */ { AArch64::Z0,
1514 AArch64::Z0, AArch64::Z0_Z1,
1515 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1516 };
1517
1518 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs"
) ? static_cast<void> (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1519, __PRETTY_FUNCTION__))
1519 " NumRegs must be <= 4 for ZRegs")(((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs"
) ? static_cast<void> (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1519, __PRETTY_FUNCTION__))
;
1520
1521 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1522 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1523 FirstRegs[(unsigned)RegTy][0]));
1524 }
1525
1526 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1527 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1527, __PRETTY_FUNCTION__))
;
1528 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1529 }
1530
1531 template <unsigned ImmIs0, unsigned ImmIs1>
1532 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1533 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1533, __PRETTY_FUNCTION__))
;
1534 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")((bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand"
) ? static_cast<void> (0) : __assert_fail ("bool(isExactFPImm<ImmIs0, ImmIs1>()) && \"Invalid operand\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1534, __PRETTY_FUNCTION__))
;
1535 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1536 }
1537
1538 void addImmOperands(MCInst &Inst, unsigned N) const {
1539 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1539, __PRETTY_FUNCTION__))
;
1540 // If this is a pageoff symrefexpr with an addend, adjust the addend
1541 // to be only the page-offset portion. Otherwise, just add the expr
1542 // as-is.
1543 addExpr(Inst, getImm());
1544 }
1545
1546 template <int Shift>
1547 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1548 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1548, __PRETTY_FUNCTION__))
;
1549 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1550 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1551 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1552 } else if (isShiftedImm()) {
1553 addExpr(Inst, getShiftedImmVal());
1554 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1555 } else {
1556 addExpr(Inst, getImm());
1557 Inst.addOperand(MCOperand::createImm(0));
1558 }
1559 }
1560
1561 template <int Shift>
1562 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1563 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1563, __PRETTY_FUNCTION__))
;
1564 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1565 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1566 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1567 } else
1568 llvm_unreachable("Not a shifted negative immediate")::llvm::llvm_unreachable_internal("Not a shifted negative immediate"
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1568)
;
1569 }
1570
1571 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1572 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1572, __PRETTY_FUNCTION__))
;
1573 Inst.addOperand(MCOperand::createImm(getCondCode()));
1574 }
1575
1576 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1577 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1577, __PRETTY_FUNCTION__))
;
1578 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1579 if (!MCE)
1580 addExpr(Inst, getImm());
1581 else
1582 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1583 }
1584
1585 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1586 addImmOperands(Inst, N);
1587 }
1588
1589 template<int Scale>
1590 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1591 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1591, __PRETTY_FUNCTION__))
;
1592 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1593
1594 if (!MCE) {
1595 Inst.addOperand(MCOperand::createExpr(getImm()));
1596 return;
1597 }
1598 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1599 }
1600
1601 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1602 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1602, __PRETTY_FUNCTION__))
;
1603 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1604 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1605 }
1606
1607 template <int Scale>
1608 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1609 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1609, __PRETTY_FUNCTION__))
;
1610 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1611 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1612 }
1613
1614 template <typename T>
1615 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1616 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1616, __PRETTY_FUNCTION__))
;
1617 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1618 typename std::make_unsigned<T>::type Val = MCE->getValue();
1619 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1620 Inst.addOperand(MCOperand::createImm(encoding));
1621 }
1622
1623 template <typename T>
1624 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1625 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1625, __PRETTY_FUNCTION__))
;
1626 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1627 typename std::make_unsigned<T>::type Val = ~MCE->getValue();
1628 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1629 Inst.addOperand(MCOperand::createImm(encoding));
1630 }
1631
1632 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1633 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1633, __PRETTY_FUNCTION__))
;
1634 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1635 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1636 Inst.addOperand(MCOperand::createImm(encoding));
1637 }
1638
1639 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1640 // Branch operands don't encode the low bits, so shift them off
1641 // here. If it's a label, however, just put it on directly as there's
1642 // not enough information now to do anything.
1643 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1643, __PRETTY_FUNCTION__))
;
1644 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1645 if (!MCE) {
1646 addExpr(Inst, getImm());
1647 return;
1648 }
1649 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1649, __PRETTY_FUNCTION__))
;
1650 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1651 }
1652
1653 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1654 // Branch operands don't encode the low bits, so shift them off
1655 // here. If it's a label, however, just put it on directly as there's
1656 // not enough information now to do anything.
1657 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1657, __PRETTY_FUNCTION__))
;
1658 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1659 if (!MCE) {
1660 addExpr(Inst, getImm());
1661 return;
1662 }
1663 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1663, __PRETTY_FUNCTION__))
;
1664 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1665 }
1666
1667 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1668 // Branch operands don't encode the low bits, so shift them off
1669 // here. If it's a label, however, just put it on directly as there's
1670 // not enough information now to do anything.
1671 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1671, __PRETTY_FUNCTION__))
;
1672 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1673 if (!MCE) {
1674 addExpr(Inst, getImm());
1675 return;
1676 }
1677 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1677, __PRETTY_FUNCTION__))
;
1678 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1679 }
1680
1681 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1682 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1682, __PRETTY_FUNCTION__))
;
1683 Inst.addOperand(MCOperand::createImm(
1684 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1685 }
1686
1687 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1688 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1688, __PRETTY_FUNCTION__))
;
1689 Inst.addOperand(MCOperand::createImm(getBarrier()));
1690 }
1691
1692 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1693 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1693, __PRETTY_FUNCTION__))
;
1694
1695 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1696 }
1697
1698 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1699 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1699, __PRETTY_FUNCTION__))
;
1700
1701 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1702 }
1703
1704 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1705 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1705, __PRETTY_FUNCTION__))
;
1706
1707 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1708 }
1709
1710 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1711 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1711, __PRETTY_FUNCTION__))
;
1712
1713 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1714 }
1715
1716 void addSysCROperands(MCInst &Inst, unsigned N) const {
1717 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1717, __PRETTY_FUNCTION__))
;
1718 Inst.addOperand(MCOperand::createImm(getSysCR()));
1719 }
1720
1721 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1722 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1722, __PRETTY_FUNCTION__))
;
1723 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1724 }
1725
1726 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1727 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1727, __PRETTY_FUNCTION__))
;
1728 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1729 }
1730
1731 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1732 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1732, __PRETTY_FUNCTION__))
;
1733 Inst.addOperand(MCOperand::createImm(getBTIHint()));
1734 }
1735
1736 void addShifterOperands(MCInst &Inst, unsigned N) const {
1737 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1737, __PRETTY_FUNCTION__))
;
1738 unsigned Imm =
1739 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1740 Inst.addOperand(MCOperand::createImm(Imm));
1741 }
1742
1743 void addExtendOperands(MCInst &Inst, unsigned N) const {
1744 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1744, __PRETTY_FUNCTION__))
;
1745 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1746 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1747 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1748 Inst.addOperand(MCOperand::createImm(Imm));
1749 }
1750
1751 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1752 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1752, __PRETTY_FUNCTION__))
;
1753 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1754 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1755 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1756 Inst.addOperand(MCOperand::createImm(Imm));
1757 }
1758
1759 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1760 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1760, __PRETTY_FUNCTION__))
;
1761 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1762 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1763 Inst.addOperand(MCOperand::createImm(IsSigned));
1764 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1765 }
1766
1767 // For 8-bit load/store instructions with a register offset, both the
1768 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1769 // they're disambiguated by whether the shift was explicit or implicit rather
1770 // than its size.
1771 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1772 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1772, __PRETTY_FUNCTION__))
;
1773 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1774 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1775 Inst.addOperand(MCOperand::createImm(IsSigned));
1776 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1777 }
1778
1779 template<int Shift>
1780 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1781 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1781, __PRETTY_FUNCTION__))
;
1782
1783 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1784 uint64_t Value = CE->getValue();
1785 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1786 }
1787
1788 template<int Shift>
1789 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1790 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1790, __PRETTY_FUNCTION__))
;
1791
1792 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1793 uint64_t Value = CE->getValue();
1794 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1795 }
1796
1797 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1798 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1798, __PRETTY_FUNCTION__))
;
1799 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1800 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1801 }
1802
1803 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1804 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1804, __PRETTY_FUNCTION__))
;
1805 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1806 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1807 }
1808
1809 void print(raw_ostream &OS) const override;
1810
1811 static std::unique_ptr<AArch64Operand>
1812 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1813 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1814 Op->Tok.Data = Str.data();
1815 Op->Tok.Length = Str.size();
1816 Op->Tok.IsSuffix = IsSuffix;
1817 Op->StartLoc = S;
1818 Op->EndLoc = S;
1819 return Op;
1820 }
1821
1822 static std::unique_ptr<AArch64Operand>
1823 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1824 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1825 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1826 unsigned ShiftAmount = 0,
1827 unsigned HasExplicitAmount = false) {
1828 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1829 Op->Reg.RegNum = RegNum;
1830 Op->Reg.Kind = Kind;
1831 Op->Reg.ElementWidth = 0;
1832 Op->Reg.EqualityTy = EqTy;
1833 Op->Reg.ShiftExtend.Type = ExtTy;
1834 Op->Reg.ShiftExtend.Amount = ShiftAmount;
1835 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1836 Op->StartLoc = S;
1837 Op->EndLoc = E;
1838 return Op;
1839 }
1840
1841 static std::unique_ptr<AArch64Operand>
1842 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1843 SMLoc S, SMLoc E, MCContext &Ctx,
1844 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1845 unsigned ShiftAmount = 0,
1846 unsigned HasExplicitAmount = false) {
1847 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1849, __PRETTY_FUNCTION__))
1848 Kind == RegKind::SVEPredicateVector) &&(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1849, __PRETTY_FUNCTION__))
1849 "Invalid vector kind")(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1849, __PRETTY_FUNCTION__))
;
1850 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1851 HasExplicitAmount);
1852 Op->Reg.ElementWidth = ElementWidth;
1853 return Op;
1854 }
1855
1856 static std::unique_ptr<AArch64Operand>
1857 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1858 unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1859 MCContext &Ctx) {
1860 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1861 Op->VectorList.RegNum = RegNum;
1862 Op->VectorList.Count = Count;
1863 Op->VectorList.NumElements = NumElements;
1864 Op->VectorList.ElementWidth = ElementWidth;
1865 Op->VectorList.RegisterKind = RegisterKind;
1866 Op->StartLoc = S;
1867 Op->EndLoc = E;
1868 return Op;
1869 }
1870
1871 static std::unique_ptr<AArch64Operand>
1872 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1873 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1874 Op->VectorIndex.Val = Idx;
1875 Op->StartLoc = S;
1876 Op->EndLoc = E;
1877 return Op;
1878 }
1879
1880 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1881 SMLoc E, MCContext &Ctx) {
1882 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1883 Op->Imm.Val = Val;
1884 Op->StartLoc = S;
1885 Op->EndLoc = E;
1886 return Op;
1887 }
1888
1889 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1890 unsigned ShiftAmount,
1891 SMLoc S, SMLoc E,
1892 MCContext &Ctx) {
1893 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1894 Op->ShiftedImm .Val = Val;
1895 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1896 Op->StartLoc = S;
1897 Op->EndLoc = E;
1898 return Op;
1899 }
1900
1901 static std::unique_ptr<AArch64Operand>
1902 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1903 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1904 Op->CondCode.Code = Code;
1905 Op->StartLoc = S;
1906 Op->EndLoc = E;
1907 return Op;
1908 }
1909
1910 static std::unique_ptr<AArch64Operand>
1911 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1912 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1913 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1914 Op->FPImm.IsExact = IsExact;
1915 Op->StartLoc = S;
1916 Op->EndLoc = S;
1917 return Op;
1918 }
1919
1920 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1921 StringRef Str,
1922 SMLoc S,
1923 MCContext &Ctx) {
1924 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1925 Op->Barrier.Val = Val;
1926 Op->Barrier.Data = Str.data();
1927 Op->Barrier.Length = Str.size();
1928 Op->StartLoc = S;
1929 Op->EndLoc = S;
1930 return Op;
1931 }
1932
1933 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1934 uint32_t MRSReg,
1935 uint32_t MSRReg,
1936 uint32_t PStateField,
1937 MCContext &Ctx) {
1938 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1939 Op->SysReg.Data = Str.data();
1940 Op->SysReg.Length = Str.size();
1941 Op->SysReg.MRSReg = MRSReg;
1942 Op->SysReg.MSRReg = MSRReg;
1943 Op->SysReg.PStateField = PStateField;
1944 Op->StartLoc = S;
1945 Op->EndLoc = S;
1946 return Op;
1947 }
1948
1949 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1950 SMLoc E, MCContext &Ctx) {
1951 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1952 Op->SysCRImm.Val = Val;
1953 Op->StartLoc = S;
1954 Op->EndLoc = E;
1955 return Op;
1956 }
1957
1958 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1959 StringRef Str,
1960 SMLoc S,
1961 MCContext &Ctx) {
1962 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1963 Op->Prefetch.Val = Val;
1964 Op->Barrier.Data = Str.data();
1965 Op->Barrier.Length = Str.size();
1966 Op->StartLoc = S;
1967 Op->EndLoc = S;
1968 return Op;
1969 }
1970
1971 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1972 StringRef Str,
1973 SMLoc S,
1974 MCContext &Ctx) {
1975 auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1976 Op->PSBHint.Val = Val;
1977 Op->PSBHint.Data = Str.data();
1978 Op->PSBHint.Length = Str.size();
1979 Op->StartLoc = S;
1980 Op->EndLoc = S;
1981 return Op;
1982 }
1983
1984 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
1985 StringRef Str,
1986 SMLoc S,
1987 MCContext &Ctx) {
1988 auto Op = make_unique<AArch64Operand>(k_BTIHint, Ctx);
1989 Op->BTIHint.Val = Val << 1 | 32;
1990 Op->BTIHint.Data = Str.data();
1991 Op->BTIHint.Length = Str.size();
1992 Op->StartLoc = S;
1993 Op->EndLoc = S;
1994 return Op;
1995 }
1996
1997 static std::unique_ptr<AArch64Operand>
1998 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1999 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2000 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2001 Op->ShiftExtend.Type = ShOp;
2002 Op->ShiftExtend.Amount = Val;
2003 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2004 Op->StartLoc = S;
2005 Op->EndLoc = E;
2006 return Op;
2007 }
2008};
2009
2010} // end anonymous namespace.
2011
2012void AArch64Operand::print(raw_ostream &OS) const {
2013 switch (Kind) {
2014 case k_FPImm:
2015 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2016 if (!getFPImmIsExact())
2017 OS << " (inexact)";
2018 OS << ">";
2019 break;
2020 case k_Barrier: {
2021 StringRef Name = getBarrierName();
2022 if (!Name.empty())
2023 OS << "<barrier " << Name << ">";
2024 else
2025 OS << "<barrier invalid #" << getBarrier() << ">";
2026 break;
2027 }
2028 case k_Immediate:
2029 OS << *getImm();
2030 break;
2031 case k_ShiftedImm: {
2032 unsigned Shift = getShiftedImmShift();
2033 OS << "<shiftedimm ";
2034 OS << *getShiftedImmVal();
2035 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2036 break;
2037 }
2038 case k_CondCode:
2039 OS << "<condcode " << getCondCode() << ">";
2040 break;
2041 case k_VectorList: {
2042 OS << "<vectorlist ";
2043 unsigned Reg = getVectorListStart();
2044 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2045 OS << Reg + i << " ";
2046 OS << ">";
2047 break;
2048 }
2049 case k_VectorIndex:
2050 OS << "<vectorindex " << getVectorIndex() << ">";
2051 break;
2052 case k_SysReg:
2053 OS << "<sysreg: " << getSysReg() << '>';
2054 break;
2055 case k_Token:
2056 OS << "'" << getToken() << "'";
2057 break;
2058 case k_SysCR:
2059 OS << "c" << getSysCR();
2060 break;
2061 case k_Prefetch: {
2062 StringRef Name = getPrefetchName();
2063 if (!Name.empty())
2064 OS << "<prfop " << Name << ">";
2065 else
2066 OS << "<prfop invalid #" << getPrefetch() << ">";
2067 break;
2068 }
2069 case k_PSBHint:
2070 OS << getPSBHintName();
2071 break;
2072 case k_Register:
2073 OS << "<register " << getReg() << ">";
2074 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2075 break;
2076 LLVM_FALLTHROUGH[[clang::fallthrough]];
2077 case k_BTIHint:
2078 OS << getBTIHintName();
2079 break;
2080 case k_ShiftExtend:
2081 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2082 << getShiftExtendAmount();
2083 if (!hasShiftExtendAmount())
2084 OS << "<imp>";
2085 OS << '>';
2086 break;
2087 }
2088}
2089
2090/// @name Auto-generated Match Functions
2091/// {
2092
2093static unsigned MatchRegisterName(StringRef Name);
2094
2095/// }
2096
2097static unsigned MatchNeonVectorRegName(StringRef Name) {
2098 return StringSwitch<unsigned>(Name.lower())
2099 .Case("v0", AArch64::Q0)
2100 .Case("v1", AArch64::Q1)
2101 .Case("v2", AArch64::Q2)
2102 .Case("v3", AArch64::Q3)
2103 .Case("v4", AArch64::Q4)
2104 .Case("v5", AArch64::Q5)
2105 .Case("v6", AArch64::Q6)
2106 .Case("v7", AArch64::Q7)
2107 .Case("v8", AArch64::Q8)
2108 .Case("v9", AArch64::Q9)
2109 .Case("v10", AArch64::Q10)
2110 .Case("v11", AArch64::Q11)
2111 .Case("v12", AArch64::Q12)
2112 .Case("v13", AArch64::Q13)
2113 .Case("v14", AArch64::Q14)
2114 .Case("v15", AArch64::Q15)
2115 .Case("v16", AArch64::Q16)
2116 .Case("v17", AArch64::Q17)
2117 .Case("v18", AArch64::Q18)
2118 .Case("v19", AArch64::Q19)
2119 .Case("v20", AArch64::Q20)
2120 .Case("v21", AArch64::Q21)
2121 .Case("v22", AArch64::Q22)
2122 .Case("v23", AArch64::Q23)
2123 .Case("v24", AArch64::Q24)
2124 .Case("v25", AArch64::Q25)
2125 .Case("v26", AArch64::Q26)
2126 .Case("v27", AArch64::Q27)
2127 .Case("v28", AArch64::Q28)
2128 .Case("v29", AArch64::Q29)
2129 .Case("v30", AArch64::Q30)
2130 .Case("v31", AArch64::Q31)
2131 .Default(0);
2132}
2133
2134/// Returns an optional pair of (#elements, element-width) if Suffix
2135/// is a valid vector kind. Where the number of elements in a vector
2136/// or the vector width is implicit or explicitly unknown (but still a
2137/// valid suffix kind), 0 is used.
2138static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2139 RegKind VectorKind) {
2140 std::pair<int, int> Res = {-1, -1};
2141
2142 switch (VectorKind) {
2143 case RegKind::NeonVector:
2144 Res =
2145 StringSwitch<std::pair<int, int>>(Suffix.lower())
2146 .Case("", {0, 0})
2147 .Case(".1d", {1, 64})
2148 .Case(".1q", {1, 128})
2149 // '.2h' needed for fp16 scalar pairwise reductions
2150 .Case(".2h", {2, 16})
2151 .Case(".2s", {2, 32})
2152 .Case(".2d", {2, 64})
2153 // '.4b' is another special case for the ARMv8.2a dot product
2154 // operand
2155 .Case(".4b", {4, 8})
2156 .Case(".4h", {4, 16})
2157 .Case(".4s", {4, 32})
2158 .Case(".8b", {8, 8})
2159 .Case(".8h", {8, 16})
2160 .Case(".16b", {16, 8})
2161 // Accept the width neutral ones, too, for verbose syntax. If those
2162 // aren't used in the right places, the token operand won't match so
2163 // all will work out.
2164 .Case(".b", {0, 8})
2165 .Case(".h", {0, 16})
2166 .Case(".s", {0, 32})
2167 .Case(".d", {0, 64})
2168 .Default({-1, -1});
2169 break;
2170 case RegKind::SVEPredicateVector:
2171 case RegKind::SVEDataVector:
2172 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2173 .Case("", {0, 0})
2174 .Case(".b", {0, 8})
2175 .Case(".h", {0, 16})
2176 .Case(".s", {0, 32})
2177 .Case(".d", {0, 64})
2178 .Case(".q", {0, 128})
2179 .Default({-1, -1});
2180 break;
2181 default:
2182 llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2182)
;
2183 }
2184
2185 if (Res == std::make_pair(-1, -1))
2186 return Optional<std::pair<int, int>>();
2187
2188 return Optional<std::pair<int, int>>(Res);
2189}
2190
2191static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2192 return parseVectorKind(Suffix, VectorKind).hasValue();
2193}
2194
2195static unsigned matchSVEDataVectorRegName(StringRef Name) {
2196 return StringSwitch<unsigned>(Name.lower())
2197 .Case("z0", AArch64::Z0)
2198 .Case("z1", AArch64::Z1)
2199 .Case("z2", AArch64::Z2)
2200 .Case("z3", AArch64::Z3)
2201 .Case("z4", AArch64::Z4)
2202 .Case("z5", AArch64::Z5)
2203 .Case("z6", AArch64::Z6)
2204 .Case("z7", AArch64::Z7)
2205 .Case("z8", AArch64::Z8)
2206 .Case("z9", AArch64::Z9)
2207 .Case("z10", AArch64::Z10)
2208 .Case("z11", AArch64::Z11)
2209 .Case("z12", AArch64::Z12)
2210 .Case("z13", AArch64::Z13)
2211 .Case("z14", AArch64::Z14)
2212 .Case("z15", AArch64::Z15)
2213 .Case("z16", AArch64::Z16)
2214 .Case("z17", AArch64::Z17)
2215 .Case("z18", AArch64::Z18)
2216 .Case("z19", AArch64::Z19)
2217 .Case("z20", AArch64::Z20)
2218 .Case("z21", AArch64::Z21)
2219 .Case("z22", AArch64::Z22)
2220 .Case("z23", AArch64::Z23)
2221 .Case("z24", AArch64::Z24)
2222 .Case("z25", AArch64::Z25)
2223 .Case("z26", AArch64::Z26)
2224 .Case("z27", AArch64::Z27)
2225 .Case("z28", AArch64::Z28)
2226 .Case("z29", AArch64::Z29)
2227 .Case("z30", AArch64::Z30)
2228 .Case("z31", AArch64::Z31)
2229 .Default(0);
2230}
2231
2232static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2233 return StringSwitch<unsigned>(Name.lower())
2234 .Case("p0", AArch64::P0)
2235 .Case("p1", AArch64::P1)
2236 .Case("p2", AArch64::P2)
2237 .Case("p3", AArch64::P3)
2238 .Case("p4", AArch64::P4)
2239 .Case("p5", AArch64::P5)
2240 .Case("p6", AArch64::P6)
2241 .Case("p7", AArch64::P7)
2242 .Case("p8", AArch64::P8)
2243 .Case("p9", AArch64::P9)
2244 .Case("p10", AArch64::P10)
2245 .Case("p11", AArch64::P11)
2246 .Case("p12", AArch64::P12)
2247 .Case("p13", AArch64::P13)
2248 .Case("p14", AArch64::P14)
2249 .Case("p15", AArch64::P15)
2250 .Default(0);
2251}
2252
2253bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2254 SMLoc &EndLoc) {
2255 StartLoc = getLoc();
2256 auto Res = tryParseScalarRegister(RegNo);
2257 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2258 return Res != MatchOperand_Success;
2259}
2260
2261// Matches a register name or register alias previously defined by '.req'
2262unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2263 RegKind Kind) {
2264 unsigned RegNum = 0;
2265 if ((RegNum = matchSVEDataVectorRegName(Name)))
2266 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2267
2268 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2269 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2270
2271 if ((RegNum = MatchNeonVectorRegName(Name)))
2272 return Kind == RegKind::NeonVector ? RegNum : 0;
2273
2274 // The parsed register must be of RegKind Scalar
2275 if ((RegNum = MatchRegisterName(Name)))
2276 return Kind == RegKind::Scalar ? RegNum : 0;
2277
2278 if (!RegNum) {
2279 // Handle a few common aliases of registers.
2280 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2281 .Case("fp", AArch64::FP)
2282 .Case("lr", AArch64::LR)
2283 .Case("x31", AArch64::XZR)
2284 .Case("w31", AArch64::WZR)
2285 .Default(0))
2286 return Kind == RegKind::Scalar ? RegNum : 0;
2287
2288 // Check for aliases registered via .req. Canonicalize to lower case.
2289 // That's more consistent since register names are case insensitive, and
2290 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2291 auto Entry = RegisterReqs.find(Name.lower());
2292 if (Entry == RegisterReqs.end())
2293 return 0;
2294
2295 // set RegNum if the match is the right kind of register
2296 if (Kind == Entry->getValue().first)
2297 RegNum = Entry->getValue().second;
2298 }
2299 return RegNum;
2300}
2301
2302/// tryParseScalarRegister - Try to parse a register name. The token must be an
2303/// Identifier when called, and if it is a register name the token is eaten and
2304/// the register is added to the operand list.
2305OperandMatchResultTy
2306AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2307 MCAsmParser &Parser = getParser();
2308 const AsmToken &Tok = Parser.getTok();
2309 if (Tok.isNot(AsmToken::Identifier))
2310 return MatchOperand_NoMatch;
2311
2312 std::string lowerCase = Tok.getString().lower();
2313 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2314 if (Reg == 0)
2315 return MatchOperand_NoMatch;
2316
2317 RegNum = Reg;
2318 Parser.Lex(); // Eat identifier token.
2319 return MatchOperand_Success;
2320}
2321
2322/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2323OperandMatchResultTy
2324AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2325 MCAsmParser &Parser = getParser();
2326 SMLoc S = getLoc();
2327
2328 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2329 Error(S, "Expected cN operand where 0 <= N <= 15");
2330 return MatchOperand_ParseFail;
2331 }
2332
2333 StringRef Tok = Parser.getTok().getIdentifier();
2334 if (Tok[0] != 'c' && Tok[0] != 'C') {
2335 Error(S, "Expected cN operand where 0 <= N <= 15");
2336 return MatchOperand_ParseFail;
2337 }
2338
2339 uint32_t CRNum;
2340 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2341 if (BadNum || CRNum > 15) {
2342 Error(S, "Expected cN operand where 0 <= N <= 15");
2343 return MatchOperand_ParseFail;
2344 }
2345
2346 Parser.Lex(); // Eat identifier token.
2347 Operands.push_back(
2348 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2349 return MatchOperand_Success;
2350}
2351
2352/// tryParsePrefetch - Try to parse a prefetch operand.
2353template <bool IsSVEPrefetch>
2354OperandMatchResultTy
2355AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2356 MCAsmParser &Parser = getParser();
2357 SMLoc S = getLoc();
2358 const AsmToken &Tok = Parser.getTok();
2359
2360 auto LookupByName = [](StringRef N) {
2361 if (IsSVEPrefetch) {
2362 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2363 return Optional<unsigned>(Res->Encoding);
2364 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2365 return Optional<unsigned>(Res->Encoding);
2366 return Optional<unsigned>();
2367 };
2368
2369 auto LookupByEncoding = [](unsigned E) {
2370 if (IsSVEPrefetch) {
2371 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2372 return Optional<StringRef>(Res->Name);
2373 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2374 return Optional<StringRef>(Res->Name);
2375 return Optional<StringRef>();
2376 };
2377 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2378
2379 // Either an identifier for named values or a 5-bit immediate.
2380 // Eat optional hash.
2381 if (parseOptionalToken(AsmToken::Hash) ||
2382 Tok.is(AsmToken::Integer)) {
2383 const MCExpr *ImmVal;
2384 if (getParser().parseExpression(ImmVal))
2385 return MatchOperand_ParseFail;
2386
2387 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2388 if (!MCE) {
2389 TokError("immediate value expected for prefetch operand");
2390 return MatchOperand_ParseFail;
2391 }
2392 unsigned prfop = MCE->getValue();
2393 if (prfop > MaxVal) {
2394 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2395 "] expected");
2396 return MatchOperand_ParseFail;
2397 }
2398
2399 auto PRFM = LookupByEncoding(MCE->getValue());
2400 Operands.push_back(AArch64Operand::CreatePrefetch(
2401 prfop, PRFM.getValueOr(""), S, getContext()));
2402 return MatchOperand_Success;
2403 }
2404
2405 if (Tok.isNot(AsmToken::Identifier)) {
2406 TokError("prefetch hint expected");
2407 return MatchOperand_ParseFail;
2408 }
2409
2410 auto PRFM = LookupByName(Tok.getString());
2411 if (!PRFM) {
2412 TokError("prefetch hint expected");
2413 return MatchOperand_ParseFail;
2414 }
2415
2416 Parser.Lex(); // Eat identifier token.
2417 Operands.push_back(AArch64Operand::CreatePrefetch(
2418 *PRFM, Tok.getString(), S, getContext()));
2419 return MatchOperand_Success;
2420}
2421
2422/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2423OperandMatchResultTy
2424AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2425 MCAsmParser &Parser = getParser();
2426 SMLoc S = getLoc();
2427 const AsmToken &Tok = Parser.getTok();
2428 if (Tok.isNot(AsmToken::Identifier)) {
2429 TokError("invalid operand for instruction");
2430 return MatchOperand_ParseFail;
2431 }
2432
2433 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2434 if (!PSB) {
2435 TokError("invalid operand for instruction");
2436 return MatchOperand_ParseFail;
2437 }
2438
2439 Parser.Lex(); // Eat identifier token.
2440 Operands.push_back(AArch64Operand::CreatePSBHint(
2441 PSB->Encoding, Tok.getString(), S, getContext()));
2442 return MatchOperand_Success;
2443}
2444
2445/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2446OperandMatchResultTy
2447AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2448 MCAsmParser &Parser = getParser();
2449 SMLoc S = getLoc();
2450 const AsmToken &Tok = Parser.getTok();
2451 if (Tok.isNot(AsmToken::Identifier)) {
2452 TokError("invalid operand for instruction");
2453 return MatchOperand_ParseFail;
2454 }
2455
2456 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2457 if (!BTI) {
2458 TokError("invalid operand for instruction");
2459 return MatchOperand_ParseFail;
2460 }
2461
2462 Parser.Lex(); // Eat identifier token.
2463 Operands.push_back(AArch64Operand::CreateBTIHint(
2464 BTI->Encoding, Tok.getString(), S, getContext()));
2465 return MatchOperand_Success;
2466}
2467
2468/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2469/// instruction.
2470OperandMatchResultTy
2471AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2472 MCAsmParser &Parser = getParser();
2473 SMLoc S = getLoc();
2474 const MCExpr *Expr;
2475
2476 if (Parser.getTok().is(AsmToken::Hash)) {
2477 Parser.Lex(); // Eat hash token.
2478 }
2479
2480 if (parseSymbolicImmVal(Expr))
2481 return MatchOperand_ParseFail;
2482
2483 AArch64MCExpr::VariantKind ELFRefKind;
2484 MCSymbolRefExpr::VariantKind DarwinRefKind;
2485 int64_t Addend;
2486 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2487 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2488 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2489 // No modifier was specified at all; this is the syntax for an ELF basic
2490 // ADRP relocation (unfortunately).
2491 Expr =
2492 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2493 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2494 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2495 Addend != 0) {
2496 Error(S, "gotpage label reference not allowed an addend");
2497 return MatchOperand_ParseFail;
2498 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2499 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2500 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2501 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2502 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2503 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2504 // The operand must be an @page or @gotpage qualified symbolref.
2505 Error(S, "page or gotpage label reference expected");
2506 return MatchOperand_ParseFail;
2507 }
2508 }
2509
2510 // We have either a label reference possibly with addend or an immediate. The
2511 // addend is a raw value here. The linker will adjust it to only reference the
2512 // page.
2513 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2514 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2515
2516 return MatchOperand_Success;
2517}
2518
2519/// tryParseAdrLabel - Parse and validate a source label for the ADR
2520/// instruction.
2521OperandMatchResultTy
2522AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2523 SMLoc S = getLoc();
2524 const MCExpr *Expr;
2525
2526 // Leave anything with a bracket to the default for SVE
2527 if (getParser().getTok().is(AsmToken::LBrac))
2528 return MatchOperand_NoMatch;
2529
2530 if (getParser().getTok().is(AsmToken::Hash))
2531 getParser().Lex(); // Eat hash token.
2532
2533 if (parseSymbolicImmVal(Expr))
2534 return MatchOperand_ParseFail;
2535
2536 AArch64MCExpr::VariantKind ELFRefKind;
2537 MCSymbolRefExpr::VariantKind DarwinRefKind;
2538 int64_t Addend;
2539 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2540 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2541 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2542 // No modifier was specified at all; this is the syntax for an ELF basic
2543 // ADR relocation (unfortunately).
2544 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2545 } else {
2546 Error(S, "unexpected adr label");
2547 return MatchOperand_ParseFail;
2548 }
2549 }
2550
2551 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2552 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2553 return MatchOperand_Success;
2554}
2555
2556/// tryParseFPImm - A floating point immediate expression operand.
2557template<bool AddFPZeroAsLiteral>
2558OperandMatchResultTy
2559AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2560 MCAsmParser &Parser = getParser();
2561 SMLoc S = getLoc();
2562
2563 bool Hash = parseOptionalToken(AsmToken::Hash);
2564
2565 // Handle negation, as that still comes through as a separate token.
2566 bool isNegative = parseOptionalToken(AsmToken::Minus);
2567
2568 const AsmToken &Tok = Parser.getTok();
2569 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2570 if (!Hash)
2571 return MatchOperand_NoMatch;
2572 TokError("invalid floating point immediate");
2573 return MatchOperand_ParseFail;
2574 }
2575
2576 // Parse hexadecimal representation.
2577 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2578 if (Tok.getIntVal() > 255 || isNegative) {
2579 TokError("encoded floating point value out of range");
2580 return MatchOperand_ParseFail;
2581 }
2582
2583 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2584 Operands.push_back(
2585 AArch64Operand::CreateFPImm(F, true, S, getContext()));
2586 } else {
2587 // Parse FP representation.
2588 APFloat RealVal(APFloat::IEEEdouble());
2589 auto Status =
2590 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2591 if (isNegative)
2592 RealVal.changeSign();
2593
2594 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2595 Operands.push_back(
2596 AArch64Operand::CreateToken("#0", false, S, getContext()));
2597 Operands.push_back(
2598 AArch64Operand::CreateToken(".0", false, S, getContext()));
2599 } else
2600 Operands.push_back(AArch64Operand::CreateFPImm(
2601 RealVal, Status == APFloat::opOK, S, getContext()));
2602 }
2603
2604 Parser.Lex(); // Eat the token.
2605
2606 return MatchOperand_Success;
2607}
2608
2609/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2610/// a shift suffix, for example '#1, lsl #12'.
2611OperandMatchResultTy
2612AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2613 MCAsmParser &Parser = getParser();
2614 SMLoc S = getLoc();
2615
2616 if (Parser.getTok().is(AsmToken::Hash))
1
Taking true branch
2617 Parser.Lex(); // Eat '#'
2618 else if (Parser.getTok().isNot(AsmToken::Integer))
2619 // Operand should start from # or should be integer, emit error otherwise.
2620 return MatchOperand_NoMatch;
2621
2622 const MCExpr *Imm;
2
'Imm' declared without an initial value
2623 if (parseSymbolicImmVal(Imm))
3
Calling 'AArch64AsmParser::parseSymbolicImmVal'
8
Returning from 'AArch64AsmParser::parseSymbolicImmVal'
9
Assuming the condition is false
10
Taking false branch
2624 return MatchOperand_ParseFail;
2625 else if (Parser.getTok().isNot(AsmToken::Comma)) {
11
Taking false branch
2626 SMLoc E = Parser.getTok().getLoc();
2627 Operands.push_back(
2628 AArch64Operand::CreateImm(Imm, S, E, getContext()));
2629 return MatchOperand_Success;
2630 }
2631
2632 // Eat ','
2633 Parser.Lex();
2634
2635 // The optional operand must be "lsl #N" where N is non-negative.
2636 if (!Parser.getTok().is(AsmToken::Identifier) ||
13
Taking false branch
2637 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
12
Assuming the condition is false
2638 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2639 return MatchOperand_ParseFail;
2640 }
2641
2642 // Eat 'lsl'
2643 Parser.Lex();
2644
2645 parseOptionalToken(AsmToken::Hash);
2646
2647 if (Parser.getTok().isNot(AsmToken::Integer)) {
14
Taking false branch
2648 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2649 return MatchOperand_ParseFail;
2650 }
2651
2652 int64_t ShiftAmount = Parser.getTok().getIntVal();
2653
2654 if (ShiftAmount < 0) {
15
Taking false branch
2655 Error(Parser.getTok().getLoc(), "positive shift amount required");
2656 return MatchOperand_ParseFail;
2657 }
2658 Parser.Lex(); // Eat the number
2659
2660 // Just in case the optional lsl #0 is used for immediates other than zero.
2661 if (ShiftAmount == 0 && Imm != 0) {
16
Assuming 'ShiftAmount' is equal to 0
17
The left operand of '!=' is a garbage value
2662 SMLoc E = Parser.getTok().getLoc();
2663 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2664 return MatchOperand_Success;
2665 }
2666
2667 SMLoc E = Parser.getTok().getLoc();
2668 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2669 S, E, getContext()));
2670 return MatchOperand_Success;
2671}
2672
2673/// parseCondCodeString - Parse a Condition Code string.
2674AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2675 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2676 .Case("eq", AArch64CC::EQ)
2677 .Case("ne", AArch64CC::NE)
2678 .Case("cs", AArch64CC::HS)
2679 .Case("hs", AArch64CC::HS)
2680 .Case("cc", AArch64CC::LO)
2681 .Case("lo", AArch64CC::LO)
2682 .Case("mi", AArch64CC::MI)
2683 .Case("pl", AArch64CC::PL)
2684 .Case("vs", AArch64CC::VS)
2685 .Case("vc", AArch64CC::VC)
2686 .Case("hi", AArch64CC::HI)
2687 .Case("ls", AArch64CC::LS)
2688 .Case("ge", AArch64CC::GE)
2689 .Case("lt", AArch64CC::LT)
2690 .Case("gt", AArch64CC::GT)
2691 .Case("le", AArch64CC::LE)
2692 .Case("al", AArch64CC::AL)
2693 .Case("nv", AArch64CC::NV)
2694 .Default(AArch64CC::Invalid);
2695
2696 if (CC == AArch64CC::Invalid &&
2697 getSTI().getFeatureBits()[AArch64::FeatureSVE])
2698 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2699 .Case("none", AArch64CC::EQ)
2700 .Case("any", AArch64CC::NE)
2701 .Case("nlast", AArch64CC::HS)
2702 .Case("last", AArch64CC::LO)
2703 .Case("first", AArch64CC::MI)
2704 .Case("nfrst", AArch64CC::PL)
2705 .Case("pmore", AArch64CC::HI)
2706 .Case("plast", AArch64CC::LS)
2707 .Case("tcont", AArch64CC::GE)
2708 .Case("tstop", AArch64CC::LT)
2709 .Default(AArch64CC::Invalid);
2710
2711 return CC;
2712}
2713
2714/// parseCondCode - Parse a Condition Code operand.
2715bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2716 bool invertCondCode) {
2717 MCAsmParser &Parser = getParser();
2718 SMLoc S = getLoc();
2719 const AsmToken &Tok = Parser.getTok();
2720 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")((Tok.is(AsmToken::Identifier) && "Token is not an Identifier"
) ? static_cast<void> (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2720, __PRETTY_FUNCTION__))
;
2721
2722 StringRef Cond = Tok.getString();
2723 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2724 if (CC == AArch64CC::Invalid)
2725 return TokError("invalid condition code");
2726 Parser.Lex(); // Eat identifier token.
2727
2728 if (invertCondCode) {
2729 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2730 return TokError("condition codes AL and NV are invalid for this instruction");
2731 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2732 }
2733
2734 Operands.push_back(
2735 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2736 return false;
2737}
2738
2739/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2740/// them if present.
2741OperandMatchResultTy
2742AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2743 MCAsmParser &Parser = getParser();
2744 const AsmToken &Tok = Parser.getTok();
2745 std::string LowerID = Tok.getString().lower();
2746 AArch64_AM::ShiftExtendType ShOp =
2747 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2748 .Case("lsl", AArch64_AM::LSL)
2749 .Case("lsr", AArch64_AM::LSR)
2750 .Case("asr", AArch64_AM::ASR)
2751 .Case("ror", AArch64_AM::ROR)
2752 .Case("msl", AArch64_AM::MSL)
2753 .Case("uxtb", AArch64_AM::UXTB)
2754 .Case("uxth", AArch64_AM::UXTH)
2755 .Case("uxtw", AArch64_AM::UXTW)
2756 .Case("uxtx", AArch64_AM::UXTX)
2757 .Case("sxtb", AArch64_AM::SXTB)
2758 .Case("sxth", AArch64_AM::SXTH)
2759 .Case("sxtw", AArch64_AM::SXTW)
2760 .Case("sxtx", AArch64_AM::SXTX)
2761 .Default(AArch64_AM::InvalidShiftExtend);
2762
2763 if (ShOp == AArch64_AM::InvalidShiftExtend)
2764 return MatchOperand_NoMatch;
2765
2766 SMLoc S = Tok.getLoc();
2767 Parser.Lex();
2768
2769 bool Hash = parseOptionalToken(AsmToken::Hash);
2770
2771 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2772 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2773 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2774 ShOp == AArch64_AM::MSL) {
2775 // We expect a number here.
2776 TokError("expected #imm after shift specifier");
2777 return MatchOperand_ParseFail;
2778 }
2779
2780 // "extend" type operations don't need an immediate, #0 is implicit.
2781 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2782 Operands.push_back(
2783 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2784 return MatchOperand_Success;
2785 }
2786
2787 // Make sure we do actually have a number, identifier or a parenthesized
2788 // expression.
2789 SMLoc E = Parser.getTok().getLoc();
2790 if (!Parser.getTok().is(AsmToken::Integer) &&
2791 !Parser.getTok().is(AsmToken::LParen) &&
2792 !Parser.getTok().is(AsmToken::Identifier)) {
2793 Error(E, "expected integer shift amount");
2794 return MatchOperand_ParseFail;
2795 }
2796
2797 const MCExpr *ImmVal;
2798 if (getParser().parseExpression(ImmVal))
2799 return MatchOperand_ParseFail;
2800
2801 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2802 if (!MCE) {
2803 Error(E, "expected constant '#imm' after shift specifier");
2804 return MatchOperand_ParseFail;
2805 }
2806
2807 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2808 Operands.push_back(AArch64Operand::CreateShiftExtend(
2809 ShOp, MCE->getValue(), true, S, E, getContext()));
2810 return MatchOperand_Success;
2811}
2812
2813static const struct Extension {
2814 const char *Name;
2815 const FeatureBitset Features;
2816} ExtensionMap[] = {
2817 {"crc", {AArch64::FeatureCRC}},
2818 {"sm4", {AArch64::FeatureSM4}},
2819 {"sha3", {AArch64::FeatureSHA3}},
2820 {"sha2", {AArch64::FeatureSHA2}},
2821 {"aes", {AArch64::FeatureAES}},
2822 {"crypto", {AArch64::FeatureCrypto}},
2823 {"fp", {AArch64::FeatureFPARMv8}},
2824 {"simd", {AArch64::FeatureNEON}},
2825 {"ras", {AArch64::FeatureRAS}},
2826 {"lse", {AArch64::FeatureLSE}},
2827 {"predres", {AArch64::FeaturePredRes}},
2828 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
2829 {"mte", {AArch64::FeatureMTE}},
2830 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
2831 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
2832 {"ccpp", {AArch64::FeatureCCPP}},
2833 {"sve", {AArch64::FeatureSVE}},
2834 // FIXME: Unsupported extensions
2835 {"pan", {}},
2836 {"lor", {}},
2837 {"rdma", {}},
2838 {"profile", {}},
2839};
2840
2841static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2842 if (FBS[AArch64::HasV8_1aOps])
2843 Str += "ARMv8.1a";
2844 else if (FBS[AArch64::HasV8_2aOps])
2845 Str += "ARMv8.2a";
2846 else if (FBS[AArch64::HasV8_3aOps])
2847 Str += "ARMv8.3a";
2848 else if (FBS[AArch64::HasV8_4aOps])
2849 Str += "ARMv8.4a";
2850 else if (FBS[AArch64::HasV8_5aOps])
2851 Str += "ARMv8.5a";
2852 else {
2853 auto ext = std::find_if(std::begin(ExtensionMap),
2854 std::end(ExtensionMap),
2855 [&](const Extension& e)
2856 // Use & in case multiple features are enabled
2857 { return (FBS & e.Features) != FeatureBitset(); }
2858 );
2859
2860 Str += ext != std::end(ExtensionMap) ? ext->Name : "(unknown)";
2861 }
2862}
2863
2864void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2865 SMLoc S) {
2866 const uint16_t Op2 = Encoding & 7;
2867 const uint16_t Cm = (Encoding & 0x78) >> 3;
2868 const uint16_t Cn = (Encoding & 0x780) >> 7;
2869 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2870
2871 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2872
2873 Operands.push_back(
2874 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2875 Operands.push_back(
2876 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2877 Operands.push_back(
2878 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2879 Expr = MCConstantExpr::create(Op2, getContext());
2880 Operands.push_back(
2881 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2882}
2883
2884/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2885/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2886bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2887 OperandVector &Operands) {
2888 if (Name.find('.') != StringRef::npos)
2889 return TokError("invalid operand");
2890
2891 Mnemonic = Name;
2892 Operands.push_back(
2893 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2894
2895 MCAsmParser &Parser = getParser();
2896 const AsmToken &Tok = Parser.getTok();
2897 StringRef Op = Tok.getString();
2898 SMLoc S = Tok.getLoc();
2899
2900 if (Mnemonic == "ic") {
2901 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2902 if (!IC)
2903 return TokError("invalid operand for IC instruction");
2904 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2905 std::string Str("IC " + std::string(IC->Name) + " requires ");
2906 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2907 return TokError(Str.c_str());
2908 }
2909 createSysAlias(IC->Encoding, Operands, S);
2910 } else if (Mnemonic == "dc") {
2911 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2912 if (!DC)
2913 return TokError("invalid operand for DC instruction");
2914 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2915 std::string Str("DC " + std::string(DC->Name) + " requires ");
2916 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
2917 return TokError(Str.c_str());
2918 }
2919 createSysAlias(DC->Encoding, Operands, S);
2920 } else if (Mnemonic == "at") {
2921 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2922 if (!AT)
2923 return TokError("invalid operand for AT instruction");
2924 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2925 std::string Str("AT " + std::string(AT->Name) + " requires ");
2926 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
2927 return TokError(Str.c_str());
2928 }
2929 createSysAlias(AT->Encoding, Operands, S);
2930 } else if (Mnemonic == "tlbi") {
2931 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2932 if (!TLBI)
2933 return TokError("invalid operand for TLBI instruction");
2934 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2935 std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2936 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
2937 return TokError(Str.c_str());
2938 }
2939 createSysAlias(TLBI->Encoding, Operands, S);
2940 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
2941 const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
2942 if (!PRCTX)
2943 return TokError("invalid operand for prediction restriction instruction");
2944 else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
2945 std::string Str(
2946 Mnemonic.upper() + std::string(PRCTX->Name) + " requires ");
2947 setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
2948 return TokError(Str.c_str());
2949 }
2950 uint16_t PRCTX_Op2 =
2951 Mnemonic == "cfp" ? 4 :
2952 Mnemonic == "dvp" ? 5 :
2953 Mnemonic == "cpp" ? 7 :
2954 0;
2955 assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction")((PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction"
) ? static_cast<void> (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2955, __PRETTY_FUNCTION__))
;
2956 createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
2957 }
2958
2959 Parser.Lex(); // Eat operand.
2960
2961 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2962 bool HasRegister = false;
2963
2964 // Check for the optional register operand.
2965 if (parseOptionalToken(AsmToken::Comma)) {
2966 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2967 return TokError("expected register operand");
2968 HasRegister = true;
2969 }
2970
2971 if (ExpectRegister && !HasRegister)
2972 return TokError("specified " + Mnemonic + " op requires a register");
2973 else if (!ExpectRegister && HasRegister)
2974 return TokError("specified " + Mnemonic + " op does not use a register");
2975
2976 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2977 return true;
2978
2979 return false;
2980}
2981
2982OperandMatchResultTy
2983AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2984 MCAsmParser &Parser = getParser();
2985 const AsmToken &Tok = Parser.getTok();
2986
2987 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
2988 TokError("'csync' operand expected");
2989 return MatchOperand_ParseFail;
2990 // Can be either a #imm style literal or an option name
2991 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
2992 // Immediate operand.
2993 const MCExpr *ImmVal;
2994 SMLoc ExprLoc = getLoc();
2995 if (getParser().parseExpression(ImmVal))
2996 return MatchOperand_ParseFail;
2997 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2998 if (!MCE) {
2999 Error(ExprLoc, "immediate value expected for barrier operand");
3000 return MatchOperand_ParseFail;
3001 }
3002 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
3003 Error(ExprLoc, "barrier operand out of range");
3004 return MatchOperand_ParseFail;
3005 }
3006 auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
3007 Operands.push_back(AArch64Operand::CreateBarrier(
3008 MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
3009 return MatchOperand_Success;
3010 }
3011
3012 if (Tok.isNot(AsmToken::Identifier)) {
3013 TokError("invalid operand for instruction");
3014 return MatchOperand_ParseFail;
3015 }
3016
3017 auto TSB = AArch64TSB::lookupTSBByName(Tok.getString());
3018 // The only valid named option for ISB is 'sy'
3019 auto DB = AArch64DB::lookupDBByName(Tok.getString());
3020 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3021 TokError("'sy' or #imm operand expected");
3022 return MatchOperand_ParseFail;
3023 // The only valid named option for TSB is 'csync'
3024 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3025 TokError("'csync' operand expected");
3026 return MatchOperand_ParseFail;
3027 } else if (!DB && !TSB) {
3028 TokError("invalid barrier option name");
3029 return MatchOperand_ParseFail;
3030 }
3031
3032 Operands.push_back(AArch64Operand::CreateBarrier(
3033 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext()));
3034 Parser.Lex(); // Consume the option
3035
3036 return MatchOperand_Success;
3037}
3038
3039OperandMatchResultTy
3040AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3041 MCAsmParser &Parser = getParser();
3042 const AsmToken &Tok = Parser.getTok();
3043
3044 if (Tok.isNot(AsmToken::Identifier))
3045 return MatchOperand_NoMatch;
3046
3047 int MRSReg, MSRReg;
3048 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3049 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3050 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3051 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3052 } else
3053 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3054
3055 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3056 unsigned PStateImm = -1;
3057 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3058 PStateImm = PState->Encoding;
3059
3060 Operands.push_back(
3061 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3062 PStateImm, getContext()));
3063 Parser.Lex(); // Eat identifier
3064
3065 return MatchOperand_Success;
3066}
3067
3068/// tryParseNeonVectorRegister - Parse a vector register operand.
3069bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3070 MCAsmParser &Parser = getParser();
3071 if (Parser.getTok().isNot(AsmToken::Identifier))
3072 return true;
3073
3074 SMLoc S = getLoc();
3075 // Check for a vector register specifier first.
3076 StringRef Kind;
3077 unsigned Reg;
3078 OperandMatchResultTy Res =
3079 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3080 if (Res != MatchOperand_Success)
3081 return true;
3082
3083 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3084 if (!KindRes)
3085 return true;
3086
3087 unsigned ElementWidth = KindRes->second;
3088 Operands.push_back(
3089 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3090 S, getLoc(), getContext()));
3091
3092 // If there was an explicit qualifier, that goes on as a literal text
3093 // operand.
3094 if (!Kind.empty())
3095 Operands.push_back(
3096 AArch64Operand::CreateToken(Kind, false, S, getContext()));
3097
3098 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3099}
3100
3101OperandMatchResultTy
3102AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3103 SMLoc SIdx = getLoc();
3104 if (parseOptionalToken(AsmToken::LBrac)) {
3105 const MCExpr *ImmVal;
3106 if (getParser().parseExpression(ImmVal))
3107 return MatchOperand_NoMatch;
3108 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3109 if (!MCE) {
3110 TokError("immediate value expected for vector index");
3111 return MatchOperand_ParseFail;;
3112 }
3113
3114 SMLoc E = getLoc();
3115
3116 if (parseToken(AsmToken::RBrac, "']' expected"))
3117 return MatchOperand_ParseFail;;
3118
3119 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3120 E, getContext()));
3121 return MatchOperand_Success;
3122 }
3123
3124 return MatchOperand_NoMatch;
3125}
3126
3127// tryParseVectorRegister - Try to parse a vector register name with
3128// optional kind specifier. If it is a register specifier, eat the token
3129// and return it.
3130OperandMatchResultTy
3131AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3132 RegKind MatchKind) {
3133 MCAsmParser &Parser = getParser();
3134 const AsmToken &Tok = Parser.getTok();
3135
3136 if (Tok.isNot(AsmToken::Identifier))
3137 return MatchOperand_NoMatch;
3138
3139 StringRef Name = Tok.getString();
3140 // If there is a kind specifier, it's separated from the register name by
3141 // a '.'.
3142 size_t Start = 0, Next = Name.find('.');
3143 StringRef Head = Name.slice(Start, Next);
3144 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3145
3146 if (RegNum) {
3147 if (Next != StringRef::npos) {
3148 Kind = Name.slice(Next, StringRef::npos);
3149 if (!isValidVectorKind(Kind, MatchKind)) {
3150 TokError("invalid vector kind qualifier");
3151 return MatchOperand_ParseFail;
3152 }
3153 }
3154 Parser.Lex(); // Eat the register token.
3155
3156 Reg = RegNum;
3157 return MatchOperand_Success;
3158 }
3159
3160 return MatchOperand_NoMatch;
3161}
3162
3163/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3164OperandMatchResultTy
3165AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3166 // Check for a SVE predicate register specifier first.
3167 const SMLoc S = getLoc();
3168 StringRef Kind;
3169 unsigned RegNum;
3170 auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3171 if (Res != MatchOperand_Success)
3172 return Res;
3173
3174 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3175 if (!KindRes)
3176 return MatchOperand_NoMatch;
3177
3178 unsigned ElementWidth = KindRes->second;
3179 Operands.push_back(AArch64Operand::CreateVectorReg(
3180 RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3181 getLoc(), getContext()));
3182
3183 // Not all predicates are followed by a '/m' or '/z'.
3184 MCAsmParser &Parser = getParser();
3185 if (Parser.getTok().isNot(AsmToken::Slash))
3186 return MatchOperand_Success;
3187
3188 // But when they do they shouldn't have an element type suffix.
3189 if (!Kind.empty()) {
3190 Error(S, "not expecting size suffix");
3191 return MatchOperand_ParseFail;
3192 }
3193
3194 // Add a literal slash as operand
3195 Operands.push_back(
3196 AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3197
3198 Parser.Lex(); // Eat the slash.
3199
3200 // Zeroing or merging?
3201 auto Pred = Parser.getTok().getString().lower();
3202 if (Pred != "z" && Pred != "m") {
3203 Error(getLoc(), "expecting 'm' or 'z' predication");
3204 return MatchOperand_ParseFail;
3205 }
3206
3207 // Add zero/merge token.
3208 const char *ZM = Pred == "z" ? "z" : "m";
3209 Operands.push_back(
3210 AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3211
3212 Parser.Lex(); // Eat zero/merge token.
3213 return MatchOperand_Success;
3214}
3215
3216/// parseRegister - Parse a register operand.
3217bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3218 // Try for a Neon vector register.
3219 if (!tryParseNeonVectorRegister(Operands))
3220 return false;
3221
3222 // Otherwise try for a scalar register.
3223 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3224 return false;
3225
3226 return true;
3227}
3228
3229bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3230 MCAsmParser &Parser = getParser();
3231 bool HasELFModifier = false;
3232 AArch64MCExpr::VariantKind RefKind;
3233
3234 if (parseOptionalToken(AsmToken::Colon)) {
4
Assuming the condition is true
5
Taking true branch
3235 HasELFModifier = true;
3236
3237 if (Parser.getTok().isNot(AsmToken::Identifier))
6
Taking true branch
3238 return TokError("expect relocation specifier in operand after ':'");
7
Returning without writing to 'ImmVal'
3239
3240 std::string LowerCase = Parser.getTok().getIdentifier().lower();
3241 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3242 .Case("lo12", AArch64MCExpr::VK_LO12)
3243 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3244 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3245 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3246 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3247 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3248 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3249 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3250 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3251 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3252 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3253 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3254 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3255 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3256 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3257 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3258 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3259 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3260 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3261 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3262 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3263 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3264 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3265 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3266 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3267 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3268 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3269 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3270 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3271 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3272 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3273 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3274 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3275 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3276 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3277 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3278 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3279 .Default(AArch64MCExpr::VK_INVALID);
3280
3281 if (RefKind == AArch64MCExpr::VK_INVALID)
3282 return TokError("expect relocation specifier in operand after ':'");
3283
3284 Parser.Lex(); // Eat identifier
3285
3286 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3287 return true;
3288 }
3289
3290 if (getParser().parseExpression(ImmVal))
3291 return true;
3292
3293 if (HasELFModifier)
3294 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3295
3296 return false;
3297}
3298
3299template <RegKind VectorKind>
3300OperandMatchResultTy
3301AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3302 bool ExpectMatch) {
3303 MCAsmParser &Parser = getParser();
3304 if (!Parser.getTok().is(AsmToken::LCurly))
3305 return MatchOperand_NoMatch;
3306
3307 // Wrapper around parse function
3308 auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3309 bool NoMatchIsError) {
3310 auto RegTok = Parser.getTok();
3311 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3312 if (ParseRes == MatchOperand_Success) {
3313 if (parseVectorKind(Kind, VectorKind))
3314 return ParseRes;
3315 llvm_unreachable("Expected a valid vector kind")::llvm::llvm_unreachable_internal("Expected a valid vector kind"
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3315)
;
3316 }
3317
3318 if (RegTok.isNot(AsmToken::Identifier) ||
3319 ParseRes == MatchOperand_ParseFail ||
3320 (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3321 Error(Loc, "vector register expected");
3322 return MatchOperand_ParseFail;
3323 }
3324
3325 return MatchOperand_NoMatch;
3326 };
3327
3328 SMLoc S = getLoc();
3329 auto LCurly = Parser.getTok();
3330 Parser.Lex(); // Eat left bracket token.
3331
3332 StringRef Kind;
3333 unsigned FirstReg;
3334 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3335
3336 // Put back the original left bracket if there was no match, so that
3337 // different types of list-operands can be matched (e.g. SVE, Neon).
3338 if (ParseRes == MatchOperand_NoMatch)
3339 Parser.getLexer().UnLex(LCurly);
3340
3341 if (ParseRes != MatchOperand_Success)
3342 return ParseRes;
3343
3344 int64_t PrevReg = FirstReg;
3345 unsigned Count = 1;
3346
3347 if (parseOptionalToken(AsmToken::Minus)) {
3348 SMLoc Loc = getLoc();
3349 StringRef NextKind;
3350
3351 unsigned Reg;
3352 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3353 if (ParseRes != MatchOperand_Success)
3354 return ParseRes;
3355
3356 // Any Kind suffices must match on all regs in the list.
3357 if (Kind != NextKind) {
3358 Error(Loc, "mismatched register size suffix");
3359 return MatchOperand_ParseFail;
3360 }
3361
3362 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3363
3364 if (Space == 0 || Space > 3) {
3365 Error(Loc, "invalid number of vectors");
3366 return MatchOperand_ParseFail;
3367 }
3368
3369 Count += Space;
3370 }
3371 else {
3372 while (parseOptionalToken(AsmToken::Comma)) {
3373 SMLoc Loc = getLoc();
3374 StringRef NextKind;
3375 unsigned Reg;
3376 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3377 if (ParseRes != MatchOperand_Success)
3378 return ParseRes;
3379
3380 // Any Kind suffices must match on all regs in the list.
3381 if (Kind != NextKind) {
3382 Error(Loc, "mismatched register size suffix");
3383 return MatchOperand_ParseFail;
3384 }
3385
3386 // Registers must be incremental (with wraparound at 31)
3387 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3388 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3389 Error(Loc, "registers must be sequential");
3390 return MatchOperand_ParseFail;
3391 }
3392
3393 PrevReg = Reg;
3394 ++Count;
3395 }
3396 }
3397
3398 if (parseToken(AsmToken::RCurly, "'}' expected"))
3399 return MatchOperand_ParseFail;
3400
3401 if (Count > 4) {
3402 Error(S, "invalid number of vectors");
3403 return MatchOperand_ParseFail;
3404 }
3405
3406 unsigned NumElements = 0;
3407 unsigned ElementWidth = 0;
3408 if (!Kind.empty()) {
3409 if (const auto &VK = parseVectorKind(Kind, VectorKind))
3410 std::tie(NumElements, ElementWidth) = *VK;
3411 }
3412
3413 Operands.push_back(AArch64Operand::CreateVectorList(
3414 FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3415 getContext()));
3416
3417 return MatchOperand_Success;
3418}
3419
3420/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3421bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3422 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3423 if (ParseRes != MatchOperand_Success)
3424 return true;
3425
3426 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3427}
3428
3429OperandMatchResultTy
3430AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3431 SMLoc StartLoc = getLoc();
3432
3433 unsigned RegNum;
3434 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3435 if (Res != MatchOperand_Success)
3436 return Res;
3437
3438 if (!parseOptionalToken(AsmToken::Comma)) {
3439 Operands.push_back(AArch64Operand::CreateReg(
3440 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3441 return MatchOperand_Success;
3442 }
3443
3444 parseOptionalToken(AsmToken::Hash);
3445
3446 if (getParser().getTok().isNot(AsmToken::Integer)) {
3447 Error(getLoc(), "index must be absent or #0");
3448 return MatchOperand_ParseFail;
3449 }
3450
3451 const MCExpr *ImmVal;
3452 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3453 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3454 Error(getLoc(), "index must be absent or #0");
3455 return MatchOperand_ParseFail;
3456 }
3457
3458 Operands.push_back(AArch64Operand::CreateReg(
3459 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3460 return MatchOperand_Success;
3461}
3462
3463template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3464OperandMatchResultTy
3465AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3466 SMLoc StartLoc = getLoc();
3467
3468 unsigned RegNum;
3469 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3470 if (Res != MatchOperand_Success)
3471 return Res;
3472
3473 // No shift/extend is the default.
3474 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3475 Operands.push_back(AArch64Operand::CreateReg(
3476 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3477 return MatchOperand_Success;
3478 }
3479
3480 // Eat the comma
3481 getParser().Lex();
3482
3483 // Match the shift
3484 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
3485 Res = tryParseOptionalShiftExtend(ExtOpnd);
3486 if (Res != MatchOperand_Success)
3487 return Res;
3488
3489 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3490 Operands.push_back(AArch64Operand::CreateReg(
3491 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3492 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3493 Ext->hasShiftExtendAmount()));
3494
3495 return MatchOperand_Success;
3496}
3497
3498bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3499 MCAsmParser &Parser = getParser();
3500
3501 // Some SVE instructions have a decoration after the immediate, i.e.
3502 // "mul vl". We parse them here and add tokens, which must be present in the
3503 // asm string in the tablegen instruction.
3504 bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3505 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3506 if (!Parser.getTok().getString().equals_lower("mul") ||
3507 !(NextIsVL || NextIsHash))
3508 return true;
3509
3510 Operands.push_back(
3511 AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3512 Parser.Lex(); // Eat the "mul"
3513
3514 if (NextIsVL) {
3515 Operands.push_back(
3516 AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3517 Parser.Lex(); // Eat the "vl"
3518 return false;
3519 }
3520
3521 if (NextIsHash) {
3522 Parser.Lex(); // Eat the #
3523 SMLoc S = getLoc();
3524
3525 // Parse immediate operand.
3526 const MCExpr *ImmVal;
3527 if (!Parser.parseExpression(ImmVal))
3528 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3529 Operands.push_back(AArch64Operand::CreateImm(
3530 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3531 getContext()));
3532 return MatchOperand_Success;
3533 }
3534 }
3535
3536 return Error(getLoc(), "expected 'vl' or '#<imm>'");
3537}
3538
3539/// parseOperand - Parse a arm instruction operand. For now this parses the
3540/// operand regardless of the mnemonic.
3541bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3542 bool invertCondCode) {
3543 MCAsmParser &Parser = getParser();
3544
3545 OperandMatchResultTy ResTy =
3546 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3547
3548 // Check if the current operand has a custom associated parser, if so, try to
3549 // custom parse the operand, or fallback to the general approach.
3550 if (ResTy == MatchOperand_Success)
3551 return false;
3552 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3553 // there was a match, but an error occurred, in which case, just return that
3554 // the operand parsing failed.
3555 if (ResTy == MatchOperand_ParseFail)
3556 return true;
3557
3558 // Nothing custom, so do general case parsing.
3559 SMLoc S, E;
3560 switch (getLexer().getKind()) {
3561 default: {
3562 SMLoc S = getLoc();
3563 const MCExpr *Expr;
3564 if (parseSymbolicImmVal(Expr))
3565 return Error(S, "invalid operand");
3566
3567 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3568 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3569 return false;
3570 }
3571 case AsmToken::LBrac: {
3572 SMLoc Loc = Parser.getTok().getLoc();
3573 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3574 getContext()));
3575 Parser.Lex(); // Eat '['
3576
3577 // There's no comma after a '[', so we can parse the next operand
3578 // immediately.
3579 return parseOperand(Operands, false, false);
3580 }
3581 case AsmToken::LCurly:
3582 return parseNeonVectorList(Operands);
3583 case AsmToken::Identifier: {
3584 // If we're expecting a Condition Code operand, then just parse that.
3585 if (isCondCode)
3586 return parseCondCode(Operands, invertCondCode);
3587
3588 // If it's a register name, parse it.
3589 if (!parseRegister(Operands))
3590 return false;
3591
3592 // See if this is a "mul vl" decoration or "mul #<int>" operand used
3593 // by SVE instructions.
3594 if (!parseOptionalMulOperand(Operands))
3595 return false;
3596
3597 // This could be an optional "shift" or "extend" operand.
3598 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3599 // We can only continue if no tokens were eaten.
3600 if (GotShift != MatchOperand_NoMatch)
3601 return GotShift;
3602
3603 // This was not a register so parse other operands that start with an
3604 // identifier (like labels) as expressions and create them as immediates.
3605 const MCExpr *IdVal;
3606 S = getLoc();
3607 if (getParser().parseExpression(IdVal))
3608 return true;
3609 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3610 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3611 return false;
3612 }
3613 case AsmToken::Integer:
3614 case AsmToken::Real:
3615 case AsmToken::Hash: {
3616 // #42 -> immediate.
3617 S = getLoc();
3618
3619 parseOptionalToken(AsmToken::Hash);
3620
3621 // Parse a negative sign
3622 bool isNegative = false;
3623 if (Parser.getTok().is(AsmToken::Minus)) {
3624 isNegative = true;
3625 // We need to consume this token only when we have a Real, otherwise
3626 // we let parseSymbolicImmVal take care of it
3627 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3628 Parser.Lex();
3629 }
3630
3631 // The only Real that should come through here is a literal #0.0 for
3632 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3633 // so convert the value.
3634 const AsmToken &Tok = Parser.getTok();
3635 if (Tok.is(AsmToken::Real)) {
3636 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3637 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3638 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3639 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3640 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3641 return TokError("unexpected floating point literal");
3642 else if (IntVal != 0 || isNegative)
3643 return TokError("expected floating-point constant #0.0");
3644 Parser.Lex(); // Eat the token.
3645
3646 Operands.push_back(
3647 AArch64Operand::CreateToken("#0", false, S, getContext()));
3648 Operands.push_back(
3649 AArch64Operand::CreateToken(".0", false, S, getContext()));
3650 return false;
3651 }
3652
3653 const MCExpr *ImmVal;
3654 if (parseSymbolicImmVal(ImmVal))
3655 return true;
3656
3657 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3658 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3659 return false;
3660 }
3661 case AsmToken::Equal: {
3662 SMLoc Loc = getLoc();
3663 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3664 return TokError("unexpected token in operand");
3665 Parser.Lex(); // Eat '='
3666 const MCExpr *SubExprVal;
3667 if (getParser().parseExpression(SubExprVal))
3668 return true;
3669
3670 if (Operands.size() < 2 ||
3671 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3672 return Error(Loc, "Only valid when first operand is register");
3673
3674 bool IsXReg =
3675 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3676 Operands[1]->getReg());
3677
3678 MCContext& Ctx = getContext();
3679 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3680 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3681 if (isa<MCConstantExpr>(SubExprVal)) {
3682 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3683 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3684 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3685 ShiftAmt += 16;
3686 Imm >>= 16;
3687 }
3688 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3689 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3690 Operands.push_back(AArch64Operand::CreateImm(
3691 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3692 if (ShiftAmt)
3693 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3694 ShiftAmt, true, S, E, Ctx));
3695 return false;
3696 }
3697 APInt Simm = APInt(64, Imm << ShiftAmt);
3698 // check if the immediate is an unsigned or signed 32-bit int for W regs
3699 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3700 return Error(Loc, "Immediate too large for register");
3701 }
3702 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3703 const MCExpr *CPLoc =
3704 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3705 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3706 return false;
3707 }
3708 }
3709}
3710
3711bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3712 const MCParsedAsmOperand &Op2) const {
3713 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3714 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3715 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3716 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3717 return MCTargetAsmParser::regsEqual(Op1, Op2);
3718
3719 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&((AOp1.isScalarReg() && AOp2.isScalarReg() &&
"Testing equality of non-scalar registers not supported") ? static_cast
<void> (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3720, __PRETTY_FUNCTION__))
3720 "Testing equality of non-scalar registers not supported")((AOp1.isScalarReg() && AOp2.isScalarReg() &&
"Testing equality of non-scalar registers not supported") ? static_cast
<void> (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3720, __PRETTY_FUNCTION__))
;
3721
3722 // Check if a registers match their sub/super register classes.
3723 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3724 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3725 if (AOp1.getRegEqualityTy() == EqualsSubReg)
3726 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3727 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3728 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3729 if (AOp2.getRegEqualityTy() == EqualsSubReg)
3730 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3731
3732 return false;
3733}
3734
3735/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3736/// operands.
3737bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3738 StringRef Name, SMLoc NameLoc,
3739 OperandVector &Operands) {
3740 MCAsmParser &Parser = getParser();
3741 Name = StringSwitch<StringRef>(Name.lower())
3742 .Case("beq", "b.eq")
3743 .Case("bne", "b.ne")
3744 .Case("bhs", "b.hs")
3745 .Case("bcs", "b.cs")
3746 .Case("blo", "b.lo")
3747 .Case("bcc", "b.cc")
3748 .Case("bmi", "b.mi")
3749 .Case("bpl", "b.pl")
3750 .Case("bvs", "b.vs")
3751 .Case("bvc", "b.vc")
3752 .Case("bhi", "b.hi")
3753 .Case("bls", "b.ls")
3754 .Case("bge", "b.ge")
3755 .Case("blt", "b.lt")
3756 .Case("bgt", "b.gt")
3757 .Case("ble", "b.le")
3758 .Case("bal", "b.al")
3759 .Case("bnv", "b.nv")
3760 .Default(Name);
3761
3762 // First check for the AArch64-specific .req directive.
3763 if (Parser.getTok().is(AsmToken::Identifier) &&
3764 Parser.getTok().getIdentifier() == ".req") {
3765 parseDirectiveReq(Name, NameLoc);
3766 // We always return 'error' for this, as we're done with this
3767 // statement and don't need to match the 'instruction."
3768 return true;
3769 }
3770
3771 // Create the leading tokens for the mnemonic, split by '.' characters.
3772 size_t Start = 0, Next = Name.find('.');
3773 StringRef Head = Name.slice(Start, Next);
3774
3775 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
3776 // the SYS instruction.
3777 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
3778 Head == "cfp" || Head == "dvp" || Head == "cpp")
3779 return parseSysAlias(Head, NameLoc, Operands);
3780
3781 Operands.push_back(
3782 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3783 Mnemonic = Head;
3784
3785 // Handle condition codes for a branch mnemonic
3786 if (Head == "b" && Next != StringRef::npos) {
3787 Start = Next;
3788 Next = Name.find('.', Start + 1);
3789 Head = Name.slice(Start + 1, Next);
3790
3791 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3792 (Head.data() - Name.data()));
3793 AArch64CC::CondCode CC = parseCondCodeString(Head);
3794 if (CC == AArch64CC::Invalid)
3795 return Error(SuffixLoc, "invalid condition code");
3796 Operands.push_back(
3797 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3798 Operands.push_back(
3799 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3800 }
3801
3802 // Add the remaining tokens in the mnemonic.
3803 while (Next != StringRef::npos) {
3804 Start = Next;
3805 Next = Name.find('.', Start + 1);
3806 Head = Name.slice(Start, Next);
3807 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3808 (Head.data() - Name.data()) + 1);
3809 Operands.push_back(
3810 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3811 }
3812
3813 // Conditional compare instructions have a Condition Code operand, which needs
3814 // to be parsed and an immediate operand created.
3815 bool condCodeFourthOperand =
3816 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3817 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3818 Head == "csinc" || Head == "csinv" || Head == "csneg");
3819
3820 // These instructions are aliases to some of the conditional select
3821 // instructions. However, the condition code is inverted in the aliased
3822 // instruction.
3823 //
3824 // FIXME: Is this the correct way to handle these? Or should the parser
3825 // generate the aliased instructions directly?
3826 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3827 bool condCodeThirdOperand =
3828 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3829
3830 // Read the remaining operands.
3831 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3832
3833 unsigned N = 1;
3834 do {
3835 // Parse and remember the operand.
3836 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3837 (N == 3 && condCodeThirdOperand) ||
3838 (N == 2 && condCodeSecondOperand),
3839 condCodeSecondOperand || condCodeThirdOperand)) {
3840 return true;
3841 }
3842
3843 // After successfully parsing some operands there are two special cases to
3844 // consider (i.e. notional operands not separated by commas). Both are due
3845 // to memory specifiers:
3846 // + An RBrac will end an address for load/store/prefetch
3847 // + An '!' will indicate a pre-indexed operation.
3848 //
3849 // It's someone else's responsibility to make sure these tokens are sane
3850 // in the given context!
3851
3852 SMLoc RLoc = Parser.getTok().getLoc();
3853 if (parseOptionalToken(AsmToken::RBrac))
3854 Operands.push_back(
3855 AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3856 SMLoc ELoc = Parser.getTok().getLoc();
3857 if (parseOptionalToken(AsmToken::Exclaim))
3858 Operands.push_back(
3859 AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3860
3861 ++N;
3862 } while (parseOptionalToken(AsmToken::Comma));
3863 }
3864
3865 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3866 return true;
3867
3868 return false;
3869}
3870
3871static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
3872 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31))(((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31
)) ? static_cast<void> (0) : __assert_fail ("(ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)"
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3872, __PRETTY_FUNCTION__))
;
3873 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
3874 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
3875 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
3876 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
3877 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
3878 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
3879}
3880
3881// FIXME: This entire function is a giant hack to provide us with decent
3882// operand range validation/diagnostics until TableGen/MC can be extended
3883// to support autogeneration of this kind of validation.
3884bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
3885 SmallVectorImpl<SMLoc> &Loc) {
3886 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3887 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
3888
3889 // A prefix only applies to the instruction following it. Here we extract
3890 // prefix information for the next instruction before validating the current
3891 // one so that in the case of failure we don't erronously continue using the
3892 // current prefix.
3893 PrefixInfo Prefix = NextPrefix;
3894 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
3895
3896 // Before validating the instruction in isolation we run through the rules
3897 // applicable when it follows a prefix instruction.
3898 // NOTE: brk & hlt can be prefixed but require no additional validation.
3899 if (Prefix.isActive() &&
3900 (Inst.getOpcode() != AArch64::BRK) &&
3901 (Inst.getOpcode() != AArch64::HLT)) {
3902
3903 // Prefixed intructions must have a destructive operand.
3904 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
3905 AArch64::NotDestructive)
3906 return Error(IDLoc, "instruction is unpredictable when following a"
3907 " movprfx, suggest replacing movprfx with mov");
3908
3909 // Destination operands must match.
3910 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
3911 return Error(Loc[0], "instruction is unpredictable when following a"
3912 " movprfx writing to a different destination");
3913
3914 // Destination operand must not be used in any other location.
3915 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
3916 if (Inst.getOperand(i).isReg() &&
3917 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
3918 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
3919 return Error(Loc[0], "instruction is unpredictable when following a"
3920 " movprfx and destination also used as non-destructive"
3921 " source");
3922 }
3923
3924 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
3925 if (Prefix.isPredicated()) {
3926 int PgIdx = -1;
3927
3928 // Find the instructions general predicate.
3929 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
3930 if (Inst.getOperand(i).isReg() &&
3931 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
3932 PgIdx = i;
3933 break;
3934 }
3935
3936 // Instruction must be predicated if the movprfx is predicated.
3937 if (PgIdx == -1 ||
3938 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
3939 return Error(IDLoc, "instruction is unpredictable when following a"
3940 " predicated movprfx, suggest using unpredicated movprfx");
3941
3942 // Instruction must use same general predicate as the movprfx.
3943 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
3944 return Error(IDLoc, "instruction is unpredictable when following a"
3945 " predicated movprfx using a different general predicate");
3946
3947 // Instruction element type must match the movprfx.
3948 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
3949 return Error(IDLoc, "instruction is unpredictable when following a"
3950 " predicated movprfx with a different element size");
3951 }
3952 }
3953
3954 // Check for indexed addressing modes w/ the base register being the
3955 // same as a destination/source register or pair load where
3956 // the Rt == Rt2. All of those are undefined behaviour.
3957 switch (Inst.getOpcode()) {
3958 case AArch64::LDPSWpre:
3959 case AArch64::LDPWpost:
3960 case AArch64::LDPWpre:
3961 case AArch64::LDPXpost:
3962 case AArch64::LDPXpre: {
3963 unsigned Rt = Inst.getOperand(1).getReg();
3964 unsigned Rt2 = Inst.getOperand(2).getReg();
3965 unsigned Rn = Inst.getOperand(3).getReg();
3966 if (RI->isSubRegisterEq(Rn, Rt))
3967 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3968 "is also a destination");
3969 if (RI->isSubRegisterEq(Rn, Rt2))
3970 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3971 "is also a destination");
3972 LLVM_FALLTHROUGH[[clang::fallthrough]];
3973 }
3974 case AArch64::LDPDi:
3975 case AArch64::LDPQi:
3976 case AArch64::LDPSi:
3977 case AArch64::LDPSWi:
3978 case AArch64::LDPWi:
3979 case AArch64::LDPXi: {
3980 unsigned Rt = Inst.getOperand(0).getReg();
3981 unsigned Rt2 = Inst.getOperand(1).getReg();
3982 if (Rt == Rt2)
3983 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3984 break;
3985 }
3986 case AArch64::LDPDpost:
3987 case AArch64::LDPDpre:
3988 case AArch64::LDPQpost:
3989 case AArch64::LDPQpre:
3990 case AArch64::LDPSpost:
3991 case AArch64::LDPSpre:
3992 case AArch64::LDPSWpost: {
3993 unsigned Rt = Inst.getOperand(1).getReg();
3994 unsigned Rt2 = Inst.getOperand(2).getReg();
3995 if (Rt == Rt2)
3996 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3997 break;
3998 }
3999 case AArch64::STPDpost:
4000 case AArch64::STPDpre:
4001 case AArch64::STPQpost:
4002 case AArch64::STPQpre:
4003 case AArch64::STPSpost:
4004 case AArch64::STPSpre:
4005 case AArch64::STPWpost:
4006 case AArch64::STPWpre:
4007 case AArch64::STPXpost:
4008 case AArch64::STPXpre: {
4009 unsigned Rt = Inst.getOperand(1).getReg();
4010 unsigned Rt2 = Inst.getOperand(2).getReg();
4011 unsigned Rn = Inst.getOperand(3).getReg();
4012 if (RI->isSubRegisterEq(Rn, Rt))
4013 return Error(Loc[0], "unpredictable STP instruction, writeback base "
4014 "is also a source");
4015 if (RI->isSubRegisterEq(Rn, Rt2))
4016 return Error(Loc[1], "unpredictable STP instruction, writeback base "
4017 "is also a source");
4018 break;
4019 }
4020 case AArch64::LDRBBpre:
4021 case AArch64::LDRBpre:
4022 case AArch64::LDRHHpre:
4023 case AArch64::LDRHpre:
4024 case AArch64::LDRSBWpre:
4025 case AArch64::LDRSBXpre:
4026 case AArch64::LDRSHWpre:
4027 case AArch64::LDRSHXpre:
4028 case AArch64::LDRSWpre:
4029 case AArch64::LDRWpre:
4030 case AArch64::LDRXpre:
4031 case AArch64::LDRBBpost:
4032 case AArch64::LDRBpost:
4033 case AArch64::LDRHHpost:
4034 case AArch64::LDRHpost:
4035 case AArch64::LDRSBWpost:
4036 case AArch64::LDRSBXpost:
4037 case AArch64::LDRSHWpost:
4038 case AArch64::LDRSHXpost:
4039 case AArch64::LDRSWpost:
4040 case AArch64::LDRWpost:
4041 case AArch64::LDRXpost: {
4042 unsigned Rt = Inst.getOperand(1).getReg();
4043 unsigned Rn = Inst.getOperand(2).getReg();
4044 if (RI->isSubRegisterEq(Rn, Rt))
4045 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4046 "is also a source");
4047 break;
4048 }
4049 case AArch64::STRBBpost:
4050 case AArch64::STRBpost:
4051 case AArch64::STRHHpost:
4052 case AArch64::STRHpost:
4053 case AArch64::STRWpost:
4054 case AArch64::STRXpost:
4055 case AArch64::STRBBpre:
4056 case AArch64::STRBpre:
4057 case AArch64::STRHHpre:
4058 case AArch64::STRHpre:
4059 case AArch64::STRWpre:
4060 case AArch64::STRXpre: {
4061 unsigned Rt = Inst.getOperand(1).getReg();
4062 unsigned Rn = Inst.getOperand(2).getReg();
4063 if (RI->isSubRegisterEq(Rn, Rt))
4064 return Error(Loc[0], "unpredictable STR instruction, writeback base "
4065 "is also a source");
4066 break;
4067 }
4068 case AArch64::STXRB:
4069 case AArch64::STXRH:
4070 case AArch64::STXRW:
4071 case AArch64::STXRX:
4072 case AArch64::STLXRB:
4073 case AArch64::STLXRH:
4074 case AArch64::STLXRW:
4075 case AArch64::STLXRX: {
4076 unsigned Rs = Inst.getOperand(0).getReg();
4077 unsigned Rt = Inst.getOperand(1).getReg();
4078 unsigned Rn = Inst.getOperand(2).getReg();
4079 if (RI->isSubRegisterEq(Rt, Rs) ||
4080 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4081 return Error(Loc[0],
4082 "unpredictable STXR instruction, status is also a source");
4083 break;
4084 }
4085 case AArch64::STXPW:
4086 case AArch64::STXPX:
4087 case AArch64::STLXPW:
4088 case AArch64::STLXPX: {
4089 unsigned Rs = Inst.getOperand(0).getReg();
4090 unsigned Rt1 = Inst.getOperand(1).getReg();
4091 unsigned Rt2 = Inst.getOperand(2).getReg();
4092 unsigned Rn = Inst.getOperand(3).getReg();
4093 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4094 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4095 return Error(Loc[0],
4096 "unpredictable STXP instruction, status is also a source");
4097 break;
4098 }
4099 }
4100
4101
4102 // Now check immediate ranges. Separate from the above as there is overlap
4103 // in the instructions being checked and this keeps the nested conditionals
4104 // to a minimum.
4105 switch (Inst.getOpcode()) {
4106 case AArch64::ADDSWri:
4107 case AArch64::ADDSXri:
4108 case AArch64::ADDWri:
4109 case AArch64::ADDXri:
4110 case AArch64::SUBSWri:
4111 case AArch64::SUBSXri:
4112 case AArch64::SUBWri:
4113 case AArch64::SUBXri: {
4114 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4115 // some slight duplication here.
4116 if (Inst.getOperand(2).isExpr()) {
4117 const MCExpr *Expr = Inst.getOperand(2).getExpr();
4118 AArch64MCExpr::VariantKind ELFRefKind;
4119 MCSymbolRefExpr::VariantKind DarwinRefKind;
4120 int64_t Addend;
4121 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4122
4123 // Only allow these with ADDXri.
4124 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4125 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4126 Inst.getOpcode() == AArch64::ADDXri)
4127 return false;
4128
4129 // Only allow these with ADDXri/ADDWri
4130 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4131 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4132 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4133 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4134 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4135 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4136 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4137 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4138 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4139 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4140 (Inst.getOpcode() == AArch64::ADDXri ||
4141 Inst.getOpcode() == AArch64::ADDWri))
4142 return false;
4143
4144 // Don't allow symbol refs in the immediate field otherwise
4145 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4146 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4147 // 'cmp w0, 'borked')
4148 return Error(Loc.back(), "invalid immediate expression");
4149 }
4150 // We don't validate more complex expressions here
4151 }
4152 return false;
4153 }
4154 default:
4155 return false;
4156 }
4157}
4158
4159static std::string AArch64MnemonicSpellCheck(StringRef S,
4160 const FeatureBitset &FBS,
4161 unsigned VariantID = 0);
4162
4163bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4164 uint64_t ErrorInfo,
4165 OperandVector &Operands) {
4166 switch (ErrCode) {
4167 case Match_InvalidTiedOperand: {
4168 RegConstraintEqualityTy EqTy =
4169 static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4170 .getRegEqualityTy();
4171 switch (EqTy) {
4172 case RegConstraintEqualityTy::EqualsSubReg:
4173 return Error(Loc, "operand must be 64-bit form of destination register");
4174 case RegConstraintEqualityTy::EqualsSuperReg:
4175 return Error(Loc, "operand must be 32-bit form of destination register");
4176 case RegConstraintEqualityTy::EqualsReg:
4177 return Error(Loc, "operand must match destination register");
4178 }
4179 llvm_unreachable("Unknown RegConstraintEqualityTy")::llvm::llvm_unreachable_internal("Unknown RegConstraintEqualityTy"
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4179)
;
4180 }
4181 case Match_MissingFeature:
4182 return Error(Loc,
4183 "instruction requires a CPU feature not currently enabled");
4184 case Match_InvalidOperand:
4185 return Error(Loc, "invalid operand for instruction");
4186 case Match_InvalidSuffix:
4187 return Error(Loc, "invalid type suffix for instruction");
4188 case Match_InvalidCondCode:
4189 return Error(Loc, "expected AArch64 condition code");
4190 case Match_AddSubRegExtendSmall:
4191 return Error(Loc,
4192 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
4193 case Match_AddSubRegExtendLarge:
4194 return Error(Loc,
4195 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4196 case Match_AddSubSecondSource:
4197 return Error(Loc,
4198 "expected compatible register, symbol or integer in range [0, 4095]");
4199 case Match_LogicalSecondSource:
4200 return Error(Loc, "expected compatible register or logical immediate");
4201 case Match_InvalidMovImm32Shift:
4202 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4203 case Match_InvalidMovImm64Shift:
4204 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4205 case Match_AddSubRegShift32:
4206 return Error(Loc,
4207 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4208 case Match_AddSubRegShift64:
4209 return Error(Loc,
4210 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4211 case Match_InvalidFPImm:
4212 return Error(Loc,
4213 "expected compatible register or floating-point constant");
4214 case Match_InvalidMemoryIndexedSImm6:
4215 return Error(Loc, "index must be an integer in range [-32, 31].");
4216 case Match_InvalidMemoryIndexedSImm5:
4217 return Error(Loc, "index must be an integer in range [-16, 15].");
4218 case Match_InvalidMemoryIndexed1SImm4:
4219 return Error(Loc, "index must be an integer in range [-8, 7].");
4220 case Match_InvalidMemoryIndexed2SImm4:
4221 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4222 case Match_InvalidMemoryIndexed3SImm4:
4223 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4224 case Match_InvalidMemoryIndexed4SImm4:
4225 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4226 case Match_InvalidMemoryIndexed16SImm4:
4227 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4228 case Match_InvalidMemoryIndexed1SImm6:
4229 return Error(Loc, "index must be an integer in range [-32, 31].");
4230 case Match_InvalidMemoryIndexedSImm8:
4231 return Error(Loc, "index must be an integer in range [-128, 127].");
4232 case Match_InvalidMemoryIndexedSImm9:
4233 return Error(Loc, "index must be an integer in range [-256, 255].");
4234 case Match_InvalidMemoryIndexed16SImm9:
4235 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4236 case Match_InvalidMemoryIndexed8SImm10:
4237 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4238 case Match_InvalidMemoryIndexed4SImm7:
4239 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4240 case Match_InvalidMemoryIndexed8SImm7:
4241 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4242 case Match_InvalidMemoryIndexed16SImm7:
4243 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4244 case Match_InvalidMemoryIndexed8UImm5:
4245 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4246 case Match_InvalidMemoryIndexed4UImm5:
4247 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4248 case Match_InvalidMemoryIndexed2UImm5:
4249 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4250 case Match_InvalidMemoryIndexed8UImm6:
4251 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4252 case Match_InvalidMemoryIndexed16UImm6:
4253 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4254 case Match_InvalidMemoryIndexed4UImm6:
4255 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4256 case Match_InvalidMemoryIndexed2UImm6:
4257 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4258 case Match_InvalidMemoryIndexed1UImm6:
4259 return Error(Loc, "index must be in range [0, 63].");
4260 case Match_InvalidMemoryWExtend8:
4261 return Error(Loc,
4262 "expected 'uxtw' or 'sxtw' with optional shift of #0");
4263 case Match_InvalidMemoryWExtend16:
4264 return Error(Loc,
4265 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4266 case Match_InvalidMemoryWExtend32:
4267 return Error(Loc,
4268 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4269 case Match_InvalidMemoryWExtend64:
4270 return Error(Loc,
4271 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4272 case Match_InvalidMemoryWExtend128:
4273 return Error(Loc,
4274 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4275 case Match_InvalidMemoryXExtend8:
4276 return Error(Loc,
4277 "expected 'lsl' or 'sxtx' with optional shift of #0");
4278 case Match_InvalidMemoryXExtend16:
4279 return Error(Loc,
4280 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4281 case Match_InvalidMemoryXExtend32:
4282 return Error(Loc,
4283 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4284 case Match_InvalidMemoryXExtend64:
4285 return Error(Loc,
4286 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4287 case Match_InvalidMemoryXExtend128:
4288 return Error(Loc,
4289 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4290 case Match_InvalidMemoryIndexed1:
4291 return Error(Loc, "index must be an integer in range [0, 4095].");
4292 case Match_InvalidMemoryIndexed2:
4293 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4294 case Match_InvalidMemoryIndexed4:
4295 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4296 case Match_InvalidMemoryIndexed8:
4297 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4298 case Match_InvalidMemoryIndexed16:
4299 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4300 case Match_InvalidImm0_1:
4301 return Error(Loc, "immediate must be an integer in range [0, 1].");
4302 case Match_InvalidImm0_7:
4303 return Error(Loc, "immediate must be an integer in range [0, 7].");
4304 case Match_InvalidImm0_15:
4305 return Error(Loc, "immediate must be an integer in range [0, 15].");
4306 case Match_InvalidImm0_31:
4307 return Error(Loc, "immediate must be an integer in range [0, 31].");
4308 case Match_InvalidImm0_63:
4309 return Error(Loc, "immediate must be an integer in range [0, 63].");
4310 case Match_InvalidImm0_127:
4311 return Error(Loc, "immediate must be an integer in range [0, 127].");
4312 case Match_InvalidImm0_255:
4313 return Error(Loc, "immediate must be an integer in range [0, 255].");
4314 case Match_InvalidImm0_65535:
4315 return Error(Loc, "immediate must be an integer in range [0, 65535].");
4316 case Match_InvalidImm1_8:
4317 return Error(Loc, "immediate must be an integer in range [1, 8].");
4318 case Match_InvalidImm1_16:
4319 return Error(Loc, "immediate must be an integer in range [1, 16].");
4320 case Match_InvalidImm1_32:
4321 return Error(Loc, "immediate must be an integer in range [1, 32].");
4322 case Match_InvalidImm1_64:
4323 return Error(Loc, "immediate must be an integer in range [1, 64].");
4324 case Match_InvalidSVEAddSubImm8:
4325 return Error(Loc, "immediate must be an integer in range [0, 255]"
4326 " with a shift amount of 0");
4327 case Match_InvalidSVEAddSubImm16:
4328 case Match_InvalidSVEAddSubImm32:
4329 case Match_InvalidSVEAddSubImm64:
4330 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4331 "multiple of 256 in range [256, 65280]");
4332 case Match_InvalidSVECpyImm8:
4333 return Error(Loc, "immediate must be an integer in range [-128, 255]"
4334 " with a shift amount of 0");
4335 case Match_InvalidSVECpyImm16:
4336 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4337 "multiple of 256 in range [-32768, 65280]");
4338 case Match_InvalidSVECpyImm32:
4339 case Match_InvalidSVECpyImm64:
4340 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4341 "multiple of 256 in range [-32768, 32512]");
4342 case Match_InvalidIndexRange1_1:
4343 return Error(Loc, "expected lane specifier '[1]'");
4344 case Match_InvalidIndexRange0_15:
4345 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4346 case Match_InvalidIndexRange0_7:
4347 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4348 case Match_InvalidIndexRange0_3:
4349 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4350 case Match_InvalidIndexRange0_1:
4351 return Error(Loc, "vector lane must be an integer in range [0, 1].");
4352 case Match_InvalidSVEIndexRange0_63:
4353 return Error(Loc, "vector lane must be an integer in range [0, 63].");
4354 case Match_InvalidSVEIndexRange0_31:
4355 return Error(Loc, "vector lane must be an integer in range [0, 31].");
4356 case Match_InvalidSVEIndexRange0_15:
4357 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4358 case Match_InvalidSVEIndexRange0_7:
4359 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4360 case Match_InvalidSVEIndexRange0_3:
4361 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4362 case Match_InvalidLabel:
4363 return Error(Loc, "expected label or encodable integer pc offset");
4364 case Match_MRS:
4365 return Error(Loc, "expected readable system register");
4366 case Match_MSR:
4367 return Error(Loc, "expected writable system register or pstate");
4368 case Match_InvalidComplexRotationEven:
4369 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4370 case Match_InvalidComplexRotationOdd:
4371 return Error(Loc, "complex rotation must be 90 or 270.");
4372 case Match_MnemonicFail: {
4373 std::string Suggestion = AArch64MnemonicSpellCheck(
4374 ((AArch64Operand &)*Operands[0]).getToken(),
4375 ComputeAvailableFeatures(STI->getFeatureBits()));
4376 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4377 }
4378 case Match_InvalidGPR64shifted8:
4379 return Error(Loc, "register must be x0..x30 or xzr, without shift");
4380 case Match_InvalidGPR64shifted16:
4381 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4382 case Match_InvalidGPR64shifted32:
4383 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4384 case Match_InvalidGPR64shifted64:
4385 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4386 case Match_InvalidGPR64NoXZRshifted8:
4387 return Error(Loc, "register must be x0..x30 without shift");
4388 case Match_InvalidGPR64NoXZRshifted16:
4389 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4390 case Match_InvalidGPR64NoXZRshifted32:
4391 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4392 case Match_InvalidGPR64NoXZRshifted64:
4393 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4394 case Match_InvalidZPR32UXTW8:
4395 case Match_InvalidZPR32SXTW8:
4396 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4397 case Match_InvalidZPR32UXTW16:
4398 case Match_InvalidZPR32SXTW16:
4399 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4400 case Match_InvalidZPR32UXTW32:
4401 case Match_InvalidZPR32SXTW32:
4402 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4403 case Match_InvalidZPR32UXTW64:
4404 case Match_InvalidZPR32SXTW64:
4405 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4406 case Match_InvalidZPR64UXTW8:
4407 case Match_InvalidZPR64SXTW8:
4408 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4409 case Match_InvalidZPR64UXTW16:
4410 case Match_InvalidZPR64SXTW16:
4411 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4412 case Match_InvalidZPR64UXTW32:
4413 case Match_InvalidZPR64SXTW32:
4414 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4415 case Match_InvalidZPR64UXTW64:
4416 case Match_InvalidZPR64SXTW64:
4417 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4418 case Match_InvalidZPR32LSL8:
4419 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4420 case Match_InvalidZPR32LSL16:
4421 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4422 case Match_InvalidZPR32LSL32:
4423 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4424 case Match_InvalidZPR32LSL64:
4425 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4426 case Match_InvalidZPR64LSL8:
4427 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4428 case Match_InvalidZPR64LSL16:
4429 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4430 case Match_InvalidZPR64LSL32:
4431 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4432 case Match_InvalidZPR64LSL64:
4433 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4434 case Match_InvalidZPR0:
4435 return Error(Loc, "expected register without element width suffix");
4436 case Match_InvalidZPR8:
4437 case Match_InvalidZPR16:
4438 case Match_InvalidZPR32:
4439 case Match_InvalidZPR64:
4440 case Match_InvalidZPR128:
4441 return Error(Loc, "invalid element width");
4442 case Match_InvalidZPR_3b8:
4443 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4444 case Match_InvalidZPR_3b16:
4445 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4446 case Match_InvalidZPR_3b32:
4447 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4448 case Match_InvalidZPR_4b16:
4449 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4450 case Match_InvalidZPR_4b32:
4451 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4452 case Match_InvalidZPR_4b64:
4453 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4454 case Match_InvalidSVEPattern:
4455 return Error(Loc, "invalid predicate pattern");
4456 case Match_InvalidSVEPredicateAnyReg:
4457 case Match_InvalidSVEPredicateBReg:
4458 case Match_InvalidSVEPredicateHReg:
4459 case Match_InvalidSVEPredicateSReg:
4460 case Match_InvalidSVEPredicateDReg:
4461 return Error(Loc, "invalid predicate register.");
4462 case Match_InvalidSVEPredicate3bAnyReg:
4463 case Match_InvalidSVEPredicate3bBReg:
4464 case Match_InvalidSVEPredicate3bHReg:
4465 case Match_InvalidSVEPredicate3bSReg:
4466 case Match_InvalidSVEPredicate3bDReg:
4467 return Error(Loc, "restricted predicate has range [0, 7].");
4468 case Match_InvalidSVEExactFPImmOperandHalfOne:
4469 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4470 case Match_InvalidSVEExactFPImmOperandHalfTwo:
4471 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4472 case Match_InvalidSVEExactFPImmOperandZeroOne:
4473 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4474 default:
4475 llvm_unreachable("unexpected error code!")::llvm::llvm_unreachable_internal("unexpected error code!", "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4475)
;
4476 }
4477}
4478
4479static const char *getSubtargetFeatureName(uint64_t Val);
4480
4481bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4482 OperandVector &Operands,
4483 MCStreamer &Out,
4484 uint64_t &ErrorInfo,
4485 bool MatchingInlineAsm) {
4486 assert(!Operands.empty() && "Unexpect empty operand list!")((!Operands.empty() && "Unexpect empty operand list!"
) ? static_cast<void> (0) : __assert_fail ("!Operands.empty() && \"Unexpect empty operand list!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4486, __PRETTY_FUNCTION__))
;
4487 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4488 assert(Op.isToken() && "Leading operand should always be a mnemonic!")((Op.isToken() && "Leading operand should always be a mnemonic!"
) ? static_cast<void> (0) : __assert_fail ("Op.isToken() && \"Leading operand should always be a mnemonic!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4488, __PRETTY_FUNCTION__))
;
4489
4490 StringRef Tok = Op.getToken();
4491 unsigned NumOperands = Operands.size();
4492
4493 if (NumOperands == 4 && Tok == "lsl") {
4494 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4495 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4496 if (Op2.isScalarReg() && Op3.isImm()) {
4497 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4498 if (Op3CE) {
4499 uint64_t Op3Val = Op3CE->getValue();
4500 uint64_t NewOp3Val = 0;
4501 uint64_t NewOp4Val = 0;
4502 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4503 Op2.getReg())) {
4504 NewOp3Val = (32 - Op3Val) & 0x1f;
4505 NewOp4Val = 31 - Op3Val;
4506 } else {
4507 NewOp3Val = (64 - Op3Val) & 0x3f;
4508 NewOp4Val = 63 - Op3Val;
4509 }
4510
4511 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4512 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4513
4514 Operands[0] = AArch64Operand::CreateToken(
4515 "ubfm", false, Op.getStartLoc(), getContext());
4516 Operands.push_back(AArch64Operand::CreateImm(
4517 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4518 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4519 Op3.getEndLoc(), getContext());
4520 }
4521 }
4522 } else if (NumOperands == 4 && Tok == "bfc") {
4523 // FIXME: Horrible hack to handle BFC->BFM alias.
4524 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4525 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4526 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4527
4528 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4529 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4530 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4531
4532 if (LSBCE && WidthCE) {
4533 uint64_t LSB = LSBCE->getValue();
4534 uint64_t Width = WidthCE->getValue();
4535
4536 uint64_t RegWidth = 0;
4537 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4538 Op1.getReg()))
4539 RegWidth = 64;
4540 else
4541 RegWidth = 32;
4542
4543 if (LSB >= RegWidth)
4544 return Error(LSBOp.getStartLoc(),
4545 "expected integer in range [0, 31]");
4546 if (Width < 1 || Width > RegWidth)
4547 return Error(WidthOp.getStartLoc(),
4548 "expected integer in range [1, 32]");
4549
4550 uint64_t ImmR = 0;
4551 if (RegWidth == 32)
4552 ImmR = (32 - LSB) & 0x1f;
4553 else
4554 ImmR = (64 - LSB) & 0x3f;
4555
4556 uint64_t ImmS = Width - 1;
4557
4558 if (ImmR != 0 && ImmS >= ImmR)
4559 return Error(WidthOp.getStartLoc(),
4560 "requested insert overflows register");
4561
4562 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4563 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4564 Operands[0] = AArch64Operand::CreateToken(
4565 "bfm", false, Op.getStartLoc(), getContext());
4566 Operands[2] = AArch64Operand::CreateReg(
4567 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4568 SMLoc(), SMLoc(), getContext());
4569 Operands[3] = AArch64Operand::CreateImm(
4570 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4571 Operands.emplace_back(
4572 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4573 WidthOp.getEndLoc(), getContext()));
4574 }
4575 }
4576 } else if (NumOperands == 5) {
4577 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4578 // UBFIZ -> UBFM aliases.
4579 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4580 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4581 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4582 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4583
4584 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4585 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4586 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4587
4588 if (Op3CE && Op4CE) {
4589 uint64_t Op3Val = Op3CE->getValue();
4590 uint64_t Op4Val = Op4CE->getValue();
4591
4592 uint64_t RegWidth = 0;
4593 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4594 Op1.getReg()))
4595 RegWidth = 64;
4596 else
4597 RegWidth = 32;
4598
4599 if (Op3Val >= RegWidth)
4600 return Error(Op3.getStartLoc(),
4601 "expected integer in range [0, 31]");
4602 if (Op4Val < 1 || Op4Val > RegWidth)
4603 return Error(Op4.getStartLoc(),
4604 "expected integer in range [1, 32]");
4605
4606 uint64_t NewOp3Val = 0;
4607 if (RegWidth == 32)
4608 NewOp3Val = (32 - Op3Val) & 0x1f;
4609 else
4610 NewOp3Val = (64 - Op3Val) & 0x3f;
4611
4612 uint64_t NewOp4Val = Op4Val - 1;
4613
4614 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4615 return Error(Op4.getStartLoc(),
4616 "requested insert overflows register");
4617
4618 const MCExpr *NewOp3 =
4619 MCConstantExpr::create(NewOp3Val, getContext());
4620 const MCExpr *NewOp4 =
4621 MCConstantExpr::create(NewOp4Val, getContext());
4622 Operands[3] = AArch64Operand::CreateImm(
4623 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4624 Operands[4] = AArch64Operand::CreateImm(
4625 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4626 if (Tok == "bfi")
4627 Operands[0] = AArch64Operand::CreateToken(
4628 "bfm", false, Op.getStartLoc(), getContext());
4629 else if (Tok == "sbfiz")
4630 Operands[0] = AArch64Operand::CreateToken(
4631 "sbfm", false, Op.getStartLoc(), getContext());
4632 else if (Tok == "ubfiz")
4633 Operands[0] = AArch64Operand::CreateToken(
4634 "ubfm", false, Op.getStartLoc(), getContext());
4635 else
4636 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4636)
;
4637 }
4638 }
4639
4640 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4641 // UBFX -> UBFM aliases.
4642 } else if (NumOperands == 5 &&
4643 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4644 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4645 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4646 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4647
4648 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4649 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4650 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4651
4652 if (Op3CE && Op4CE) {
4653 uint64_t Op3Val = Op3CE->getValue();
4654 uint64_t Op4Val = Op4CE->getValue();
4655
4656 uint64_t RegWidth = 0;
4657 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4658 Op1.getReg()))
4659 RegWidth = 64;
4660 else
4661 RegWidth = 32;
4662
4663 if (Op3Val >= RegWidth)
4664 return Error(Op3.getStartLoc(),
4665 "expected integer in range [0, 31]");
4666 if (Op4Val < 1 || Op4Val > RegWidth)
4667 return Error(Op4.getStartLoc(),
4668 "expected integer in range [1, 32]");
4669
4670 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4671
4672 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4673 return Error(Op4.getStartLoc(),
4674 "requested extract overflows register");
4675
4676 const MCExpr *NewOp4 =
4677 MCConstantExpr::create(NewOp4Val, getContext());
4678 Operands[4] = AArch64Operand::CreateImm(
4679 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4680 if (Tok == "bfxil")
4681 Operands[0] = AArch64Operand::CreateToken(
4682 "bfm", false, Op.getStartLoc(), getContext());
4683 else if (Tok == "sbfx")
4684 Operands[0] = AArch64Operand::CreateToken(
4685 "sbfm", false, Op.getStartLoc(), getContext());
4686 else if (Tok == "ubfx")
4687 Operands[0] = AArch64Operand::CreateToken(
4688 "ubfm", false, Op.getStartLoc(), getContext());
4689 else
4690 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4690)
;
4691 }
4692 }
4693 }
4694 }
4695
4696 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4697 // instruction for FP registers correctly in some rare circumstances. Convert
4698 // it to a safe instruction and warn (because silently changing someone's
4699 // assembly is rude).
4700 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4701 NumOperands == 4 && Tok == "movi") {
4702 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4703 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4704 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4705 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4706 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4707 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4708 if (Suffix.lower() == ".2d" &&
4709 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4710 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4711 " correctly on this CPU, converting to equivalent movi.16b");
4712 // Switch the suffix to .16b.
4713 unsigned Idx = Op1.isToken() ? 1 : 2;
4714 Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4715 getContext());
4716 }
4717 }
4718 }
4719
4720 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4721 // InstAlias can't quite handle this since the reg classes aren't
4722 // subclasses.
4723 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4724 // The source register can be Wn here, but the matcher expects a
4725 // GPR64. Twiddle it here if necessary.
4726 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4727 if (Op.isScalarReg()) {
4728 unsigned Reg = getXRegFromWReg(Op.getReg());
4729 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4730 Op.getStartLoc(), Op.getEndLoc(),
4731 getContext());
4732 }
4733 }
4734 // FIXME: Likewise for sxt[bh] with a Xd dst operand
4735 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4736 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4737 if (Op.isScalarReg() &&
4738 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4739 Op.getReg())) {
4740 // The source register can be Wn here, but the matcher expects a
4741 // GPR64. Twiddle it here if necessary.
4742 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4743 if (Op.isScalarReg()) {
4744 unsigned Reg = getXRegFromWReg(Op.getReg());
4745 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4746 Op.getStartLoc(),
4747 Op.getEndLoc(), getContext());
4748 }
4749 }
4750 }
4751 // FIXME: Likewise for uxt[bh] with a Xd dst operand
4752 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4753 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4754 if (Op.isScalarReg() &&
4755 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4756 Op.getReg())) {
4757 // The source register can be Wn here, but the matcher expects a
4758 // GPR32. Twiddle it here if necessary.
4759 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4760 if (Op.isScalarReg()) {
4761 unsigned Reg = getWRegFromXReg(Op.getReg());
4762 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4763 Op.getStartLoc(),
4764 Op.getEndLoc(), getContext());
4765 }
4766 }
4767 }
4768
4769 MCInst Inst;
4770 FeatureBitset MissingFeatures;
4771 // First try to match against the secondary set of tables containing the
4772 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4773 unsigned MatchResult =
4774 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4775 MatchingInlineAsm, 1);
4776
4777 // If that fails, try against the alternate table containing long-form NEON:
4778 // "fadd v0.2s, v1.2s, v2.2s"
4779 if (MatchResult != Match_Success) {
4780 // But first, save the short-form match result: we can use it in case the
4781 // long-form match also fails.
4782 auto ShortFormNEONErrorInfo = ErrorInfo;
4783 auto ShortFormNEONMatchResult = MatchResult;
4784 auto ShortFormNEONMissingFeatures = MissingFeatures;
4785
4786 MatchResult =
4787 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4788 MatchingInlineAsm, 0);
4789
4790 // Now, both matches failed, and the long-form match failed on the mnemonic
4791 // suffix token operand. The short-form match failure is probably more
4792 // relevant: use it instead.
4793 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4794 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4795 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4796 MatchResult = ShortFormNEONMatchResult;
4797 ErrorInfo = ShortFormNEONErrorInfo;
4798 MissingFeatures = ShortFormNEONMissingFeatures;
4799 }
4800 }
4801
4802 switch (MatchResult) {
4803 case Match_Success: {
4804 // Perform range checking and other semantic validations
4805 SmallVector<SMLoc, 8> OperandLocs;
4806 NumOperands = Operands.size();
4807 for (unsigned i = 1; i < NumOperands; ++i)
4808 OperandLocs.push_back(Operands[i]->getStartLoc());
4809 if (validateInstruction(Inst, IDLoc, OperandLocs))
4810 return true;
4811
4812 Inst.setLoc(IDLoc);
4813 Out.EmitInstruction(Inst, getSTI());
4814 return false;
4815 }
4816 case Match_MissingFeature: {
4817 assert(MissingFeatures.any() && "Unknown missing feature!")((MissingFeatures.any() && "Unknown missing feature!"
) ? static_cast<void> (0) : __assert_fail ("MissingFeatures.any() && \"Unknown missing feature!\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4817, __PRETTY_FUNCTION__))
;
4818 // Special case the error message for the very common case where only
4819 // a single subtarget feature is missing (neon, e.g.).
4820 std::string Msg = "instruction requires:";
4821 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
4822 if (MissingFeatures[i]) {
4823 Msg += " ";
4824 Msg += getSubtargetFeatureName(i);
4825 }
4826 }
4827 return Error(IDLoc, Msg);
4828 }
4829 case Match_MnemonicFail:
4830 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
4831 case Match_InvalidOperand: {
4832 SMLoc ErrorLoc = IDLoc;
4833
4834 if (ErrorInfo != ~0ULL) {
4835 if (ErrorInfo >= Operands.size())
4836 return Error(IDLoc, "too few operands for instruction",
4837 SMRange(IDLoc, getTok().getLoc()));
4838
4839 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4840 if (ErrorLoc == SMLoc())
4841 ErrorLoc = IDLoc;
4842 }
4843 // If the match failed on a suffix token operand, tweak the diagnostic
4844 // accordingly.
4845 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4846 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4847 MatchResult = Match_InvalidSuffix;
4848
4849 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4850 }
4851 case Match_InvalidTiedOperand:
4852 case Match_InvalidMemoryIndexed1:
4853 case Match_InvalidMemoryIndexed2:
4854 case Match_InvalidMemoryIndexed4:
4855 case Match_InvalidMemoryIndexed8:
4856 case Match_InvalidMemoryIndexed16:
4857 case Match_InvalidCondCode:
4858 case Match_AddSubRegExtendSmall:
4859 case Match_AddSubRegExtendLarge:
4860 case Match_AddSubSecondSource:
4861 case Match_LogicalSecondSource:
4862 case Match_AddSubRegShift32:
4863 case Match_AddSubRegShift64:
4864 case Match_InvalidMovImm32Shift:
4865 case Match_InvalidMovImm64Shift:
4866 case Match_InvalidFPImm:
4867 case Match_InvalidMemoryWExtend8:
4868 case Match_InvalidMemoryWExtend16:
4869 case Match_InvalidMemoryWExtend32:
4870 case Match_InvalidMemoryWExtend64:
4871 case Match_InvalidMemoryWExtend128:
4872 case Match_InvalidMemoryXExtend8:
4873 case Match_InvalidMemoryXExtend16:
4874 case Match_InvalidMemoryXExtend32:
4875 case Match_InvalidMemoryXExtend64:
4876 case Match_InvalidMemoryXExtend128:
4877 case Match_InvalidMemoryIndexed1SImm4:
4878 case Match_InvalidMemoryIndexed2SImm4:
4879 case Match_InvalidMemoryIndexed3SImm4:
4880 case Match_InvalidMemoryIndexed4SImm4:
4881 case Match_InvalidMemoryIndexed1SImm6:
4882 case Match_InvalidMemoryIndexed16SImm4:
4883 case Match_InvalidMemoryIndexed4SImm7:
4884 case Match_InvalidMemoryIndexed8SImm7:
4885 case Match_InvalidMemoryIndexed16SImm7:
4886 case Match_InvalidMemoryIndexed8UImm5:
4887 case Match_InvalidMemoryIndexed4UImm5:
4888 case Match_InvalidMemoryIndexed2UImm5:
4889 case Match_InvalidMemoryIndexed1UImm6:
4890 case Match_InvalidMemoryIndexed2UImm6:
4891 case Match_InvalidMemoryIndexed4UImm6:
4892 case Match_InvalidMemoryIndexed8UImm6:
4893 case Match_InvalidMemoryIndexed16UImm6:
4894 case Match_InvalidMemoryIndexedSImm6:
4895 case Match_InvalidMemoryIndexedSImm5:
4896 case Match_InvalidMemoryIndexedSImm8:
4897 case Match_InvalidMemoryIndexedSImm9:
4898 case Match_InvalidMemoryIndexed16SImm9:
4899 case Match_InvalidMemoryIndexed8SImm10:
4900 case Match_InvalidImm0_1:
4901 case Match_InvalidImm0_7:
4902 case Match_InvalidImm0_15:
4903 case Match_InvalidImm0_31:
4904 case Match_InvalidImm0_63:
4905 case Match_InvalidImm0_127:
4906 case Match_InvalidImm0_255:
4907 case Match_InvalidImm0_65535:
4908 case Match_InvalidImm1_8:
4909 case Match_InvalidImm1_16:
4910 case Match_InvalidImm1_32:
4911 case Match_InvalidImm1_64:
4912 case Match_InvalidSVEAddSubImm8:
4913 case Match_InvalidSVEAddSubImm16:
4914 case Match_InvalidSVEAddSubImm32:
4915 case Match_InvalidSVEAddSubImm64:
4916 case Match_InvalidSVECpyImm8:
4917 case Match_InvalidSVECpyImm16:
4918 case Match_InvalidSVECpyImm32:
4919 case Match_InvalidSVECpyImm64:
4920 case Match_InvalidIndexRange1_1:
4921 case Match_InvalidIndexRange0_15:
4922 case Match_InvalidIndexRange0_7:
4923 case Match_InvalidIndexRange0_3:
4924 case Match_InvalidIndexRange0_1:
4925 case Match_InvalidSVEIndexRange0_63:
4926 case Match_InvalidSVEIndexRange0_31:
4927 case Match_InvalidSVEIndexRange0_15:
4928 case Match_InvalidSVEIndexRange0_7:
4929 case Match_InvalidSVEIndexRange0_3:
4930 case Match_InvalidLabel:
4931 case Match_InvalidComplexRotationEven:
4932 case Match_InvalidComplexRotationOdd:
4933 case Match_InvalidGPR64shifted8:
4934 case Match_InvalidGPR64shifted16:
4935 case Match_InvalidGPR64shifted32:
4936 case Match_InvalidGPR64shifted64:
4937 case Match_InvalidGPR64NoXZRshifted8:
4938 case Match_InvalidGPR64NoXZRshifted16:
4939 case Match_InvalidGPR64NoXZRshifted32:
4940 case Match_InvalidGPR64NoXZRshifted64:
4941 case Match_InvalidZPR32UXTW8:
4942 case Match_InvalidZPR32UXTW16:
4943 case Match_InvalidZPR32UXTW32:
4944 case Match_InvalidZPR32UXTW64:
4945 case Match_InvalidZPR32SXTW8:
4946 case Match_InvalidZPR32SXTW16:
4947 case Match_InvalidZPR32SXTW32:
4948 case Match_InvalidZPR32SXTW64:
4949 case Match_InvalidZPR64UXTW8:
4950 case Match_InvalidZPR64SXTW8:
4951 case Match_InvalidZPR64UXTW16:
4952 case Match_InvalidZPR64SXTW16:
4953 case Match_InvalidZPR64UXTW32:
4954 case Match_InvalidZPR64SXTW32:
4955 case Match_InvalidZPR64UXTW64:
4956 case Match_InvalidZPR64SXTW64:
4957 case Match_InvalidZPR32LSL8:
4958 case Match_InvalidZPR32LSL16:
4959 case Match_InvalidZPR32LSL32:
4960 case Match_InvalidZPR32LSL64:
4961 case Match_InvalidZPR64LSL8:
4962 case Match_InvalidZPR64LSL16:
4963 case Match_InvalidZPR64LSL32:
4964 case Match_InvalidZPR64LSL64:
4965 case Match_InvalidZPR0:
4966 case Match_InvalidZPR8:
4967 case Match_InvalidZPR16:
4968 case Match_InvalidZPR32:
4969 case Match_InvalidZPR64:
4970 case Match_InvalidZPR128:
4971 case Match_InvalidZPR_3b8:
4972 case Match_InvalidZPR_3b16:
4973 case Match_InvalidZPR_3b32:
4974 case Match_InvalidZPR_4b16:
4975 case Match_InvalidZPR_4b32:
4976 case Match_InvalidZPR_4b64:
4977 case Match_InvalidSVEPredicateAnyReg:
4978 case Match_InvalidSVEPattern:
4979 case Match_InvalidSVEPredicateBReg:
4980 case Match_InvalidSVEPredicateHReg:
4981 case Match_InvalidSVEPredicateSReg:
4982 case Match_InvalidSVEPredicateDReg:
4983 case Match_InvalidSVEPredicate3bAnyReg:
4984 case Match_InvalidSVEPredicate3bBReg:
4985 case Match_InvalidSVEPredicate3bHReg:
4986 case Match_InvalidSVEPredicate3bSReg:
4987 case Match_InvalidSVEPredicate3bDReg:
4988 case Match_InvalidSVEExactFPImmOperandHalfOne:
4989 case Match_InvalidSVEExactFPImmOperandHalfTwo:
4990 case Match_InvalidSVEExactFPImmOperandZeroOne:
4991 case Match_MSR:
4992 case Match_MRS: {
4993 if (ErrorInfo >= Operands.size())
4994 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
4995 // Any time we get here, there's nothing fancy to do. Just get the
4996 // operand SMLoc and display the diagnostic.
4997 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4998 if (ErrorLoc == SMLoc())
4999 ErrorLoc = IDLoc;
5000 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5001 }
5002 }
5003
5004 llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!"
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5004)
;
5005}
5006
5007/// ParseDirective parses the arm specific directives
5008bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5009 const MCObjectFileInfo::Environment Format =
5010 getContext().getObjectFileInfo()->getObjectFileType();
5011 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
5012
5013 StringRef IDVal = DirectiveID.getIdentifier();
5014 SMLoc Loc = DirectiveID.getLoc();
5015 if (IDVal == ".arch")
5016 parseDirectiveArch(Loc);
5017 else if (IDVal == ".cpu")
5018 parseDirectiveCPU(Loc);
5019 else if (IDVal == ".tlsdesccall")
5020 parseDirectiveTLSDescCall(Loc);
5021 else if (IDVal == ".ltorg" || IDVal == ".pool")
5022 parseDirectiveLtorg(Loc);
5023 else if (IDVal == ".unreq")
5024 parseDirectiveUnreq(Loc);
5025 else if (IDVal == ".inst")
5026 parseDirectiveInst(Loc);
5027 else if (IDVal == ".cfi_negate_ra_state")
5028 parseDirectiveCFINegateRAState();
5029 else if (IDVal == ".cfi_b_key_frame")
5030 parseDirectiveCFIBKeyFrame();
5031 else if (IDVal == ".arch_extension")
5032 parseDirectiveArchExtension(Loc);
5033 else if (IsMachO) {
5034 if (IDVal == MCLOHDirectiveName())
5035 parseDirectiveLOH(IDVal, Loc);
5036 else
5037 return true;
5038 } else
5039 return true;
5040 return false;
5041}
5042
5043static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
5044 SmallVector<StringRef, 4> &RequestedExtensions) {
5045 const bool NoCrypto =
5046 (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5047 "nocrypto") != std::end(RequestedExtensions));
5048 const bool Crypto =
5049 (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5050 "crypto") != std::end(RequestedExtensions));
5051
5052 if (!NoCrypto && Crypto) {
5053 switch (ArchKind) {
5054 default:
5055 // Map 'generic' (and others) to sha2 and aes, because
5056 // that was the traditional meaning of crypto.
5057 case AArch64::ArchKind::ARMV8_1A:
5058 case AArch64::ArchKind::ARMV8_2A:
5059 case AArch64::ArchKind::ARMV8_3A:
5060 RequestedExtensions.push_back("sha2");
5061 RequestedExtensions.push_back("aes");
5062 break;
5063 case AArch64::ArchKind::ARMV8_4A:
5064 case AArch64::ArchKind::ARMV8_5A:
5065 RequestedExtensions.push_back("sm4");
5066 RequestedExtensions.push_back("sha3");
5067 RequestedExtensions.push_back("sha2");
5068 RequestedExtensions.push_back("aes");
5069 break;
5070 }
5071 } else if (NoCrypto) {
5072 switch (ArchKind) {
5073 default:
5074 // Map 'generic' (and others) to sha2 and aes, because
5075 // that was the traditional meaning of crypto.
5076 case AArch64::ArchKind::ARMV8_1A:
5077 case AArch64::ArchKind::ARMV8_2A:
5078 case AArch64::ArchKind::ARMV8_3A:
5079 RequestedExtensions.push_back("nosha2");
5080 RequestedExtensions.push_back("noaes");
5081 break;
5082 case AArch64::ArchKind::ARMV8_4A:
5083 case AArch64::ArchKind::ARMV8_5A:
5084 RequestedExtensions.push_back("nosm4");
5085 RequestedExtensions.push_back("nosha3");
5086 RequestedExtensions.push_back("nosha2");
5087 RequestedExtensions.push_back("noaes");
5088 break;
5089 }
5090 }
5091}
5092
5093/// parseDirectiveArch
5094/// ::= .arch token
5095bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
5096 SMLoc ArchLoc = getLoc();
5097
5098 StringRef Arch, ExtensionString;
5099 std::tie(Arch, ExtensionString) =
5100 getParser().parseStringToEndOfStatement().trim().split('+');
5101
5102 AArch64::ArchKind ID = AArch64::parseArch(Arch);
5103 if (ID == AArch64::ArchKind::INVALID)
5104 return Error(ArchLoc, "unknown arch name");
5105
5106 if (parseToken(AsmToken::EndOfStatement))
5107 return true;
5108
5109 // Get the architecture and extension features.
5110 std::vector<StringRef> AArch64Features;
5111 AArch64::getArchFeatures(ID, AArch64Features);
5112 AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
5113 AArch64Features);
5114
5115 MCSubtargetInfo &STI = copySTI();
5116 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
5117 STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
5118
5119 SmallVector<StringRef, 4> RequestedExtensions;
5120 if (!ExtensionString.empty())
5121 ExtensionString.split(RequestedExtensions, '+');
5122
5123 ExpandCryptoAEK(ID, RequestedExtensions);
5124
5125 FeatureBitset Features = STI.getFeatureBits();
5126 for (auto Name : RequestedExtensions) {
5127 bool EnableFeature = true;
5128
5129 if (Name.startswith_lower("no")) {
5130 EnableFeature = false;
5131 Name = Name.substr(2);
5132 }
5133
5134 for (const auto &Extension : ExtensionMap) {
5135 if (Extension.Name != Name)
5136 continue;
5137
5138 if (Extension.Features.none())
5139 report_fatal_error("unsupported architectural extension: " + Name);
5140
5141 FeatureBitset ToggleFeatures = EnableFeature
5142 ? (~Features & Extension.Features)
5143 : ( Features & Extension.Features);
5144 FeatureBitset Features =
5145 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5146 setAvailableFeatures(Features);
5147 break;
5148 }
5149 }
5150 return false;
5151}
5152
5153/// parseDirectiveArchExtension
5154/// ::= .arch_extension [no]feature
5155bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
5156 SMLoc ExtLoc = getLoc();
5157
5158 StringRef Name = getParser().parseStringToEndOfStatement().trim();
5159
5160 if (parseToken(AsmToken::EndOfStatement,
5161 "unexpected token in '.arch_extension' directive"))
5162 return true;
5163
5164 bool EnableFeature = true;
5165 if (Name.startswith_lower("no")) {
5166 EnableFeature = false;
5167 Name = Name.substr(2);
5168 }
5169
5170 MCSubtargetInfo &STI = copySTI();
5171 FeatureBitset Features = STI.getFeatureBits();
5172 for (const auto &Extension : ExtensionMap) {
5173 if (Extension.Name != Name)
5174 continue;
5175
5176 if (Extension.Features.none())
5177 return Error(ExtLoc, "unsupported architectural extension: " + Name);
5178
5179 FeatureBitset ToggleFeatures = EnableFeature
5180 ? (~Features & Extension.Features)
5181 : (Features & Extension.Features);
5182 FeatureBitset Features =
5183 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5184 setAvailableFeatures(Features);
5185 return false;
5186 }
5187
5188 return Error(ExtLoc, "unknown architectural extension: " + Name);
5189}
5190
5191static SMLoc incrementLoc(SMLoc L, int Offset) {
5192 return SMLoc::getFromPointer(L.getPointer() + Offset);
5193}
5194
5195/// parseDirectiveCPU
5196/// ::= .cpu id
5197bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5198 SMLoc CurLoc = getLoc();
5199
5200 StringRef CPU, ExtensionString;
5201 std::tie(CPU, ExtensionString) =
5202 getParser().parseStringToEndOfStatement().trim().split('+');
5203
5204 if (parseToken(AsmToken::EndOfStatement))
5205 return true;
5206
5207 SmallVector<StringRef, 4> RequestedExtensions;
5208 if (!ExtensionString.empty())
5209 ExtensionString.split(RequestedExtensions, '+');
5210
5211 // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5212 // once that is tablegen'ed
5213 if (!getSTI().isCPUStringValid(CPU)) {
5214 Error(CurLoc, "unknown CPU name");
5215 return false;
5216 }
5217
5218 MCSubtargetInfo &STI = copySTI();
5219 STI.setDefaultFeatures(CPU, "");
5220 CurLoc = incrementLoc(CurLoc, CPU.size());
5221
5222 ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5223
5224 FeatureBitset Features = STI.getFeatureBits();
5225 for (auto Name : RequestedExtensions) {
5226 // Advance source location past '+'.
5227 CurLoc = incrementLoc(CurLoc, 1);
5228
5229 bool EnableFeature = true;
5230
5231 if (Name.startswith_lower("no")) {
5232 EnableFeature = false;
5233 Name = Name.substr(2);
5234 }
5235
5236 bool FoundExtension = false;
5237 for (const auto &Extension : ExtensionMap) {
5238 if (Extension.Name != Name)
5239 continue;
5240
5241 if (Extension.Features.none())
5242 report_fatal_error("unsupported architectural extension: " + Name);
5243
5244 FeatureBitset ToggleFeatures = EnableFeature
5245 ? (~Features & Extension.Features)
5246 : ( Features & Extension.Features);
5247 FeatureBitset Features =
5248 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5249 setAvailableFeatures(Features);
5250 FoundExtension = true;
5251
5252 break;
5253 }
5254
5255 if (!FoundExtension)
5256 Error(CurLoc, "unsupported architectural extension");
5257
5258 CurLoc = incrementLoc(CurLoc, Name.size());
5259 }
5260 return false;
5261}
5262
5263/// parseDirectiveInst
5264/// ::= .inst opcode [, ...]
5265bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5266 if (getLexer().is(AsmToken::EndOfStatement))
5267 return Error(Loc, "expected expression following '.inst' directive");
5268
5269 auto parseOp = [&]() -> bool {
5270 SMLoc L = getLoc();
5271 const MCExpr *Expr;
5272 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5273 return true;
5274 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5275 if (check(!Value, L, "expected constant expression"))
5276 return true;
5277 getTargetStreamer().emitInst(Value->getValue());
5278 return false;
5279 };
5280
5281 if (parseMany(parseOp))
5282 return addErrorSuffix(" in '.inst' directive");
5283 return false;
5284}
5285
5286// parseDirectiveTLSDescCall:
5287// ::= .tlsdesccall symbol
5288bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5289 StringRef Name;
5290 if (check(getParser().parseIdentifier(Name), L,
5291 "expected symbol after directive") ||
5292 parseToken(AsmToken::EndOfStatement))
5293 return true;
5294
5295 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5296 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5297 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5298
5299 MCInst Inst;
5300 Inst.setOpcode(AArch64::TLSDESCCALL);
5301 Inst.addOperand(MCOperand::createExpr(Expr));
5302
5303 getParser().getStreamer().EmitInstruction(Inst, getSTI());
5304 return false;
5305}
5306
5307/// ::= .loh <lohName | lohId> label1, ..., labelN
5308/// The number of arguments depends on the loh identifier.
5309bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5310 MCLOHType Kind;
5311 if (getParser().getTok().isNot(AsmToken::Identifier)) {
5312 if (getParser().getTok().isNot(AsmToken::Integer))
5313 return TokError("expected an identifier or a number in directive");
5314 // We successfully get a numeric value for the identifier.
5315 // Check if it is valid.
5316 int64_t Id = getParser().getTok().getIntVal();
5317 if (Id <= -1U && !isValidMCLOHType(Id))
5318 return TokError("invalid numeric identifier in directive");
5319 Kind = (MCLOHType)Id;
5320 } else {
5321 StringRef Name = getTok().getIdentifier();
5322 // We successfully parse an identifier.
5323 // Check if it is a recognized one.
5324 int Id = MCLOHNameToId(Name);
5325
5326 if (Id == -1)
5327 return TokError("invalid identifier in directive");
5328 Kind = (MCLOHType)Id;
5329 }
5330 // Consume the identifier.
5331 Lex();
5332 // Get the number of arguments of this LOH.
5333 int NbArgs = MCLOHIdToNbArgs(Kind);
5334
5335 assert(NbArgs != -1 && "Invalid number of arguments")((NbArgs != -1 && "Invalid number of arguments") ? static_cast
<void> (0) : __assert_fail ("NbArgs != -1 && \"Invalid number of arguments\""
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5335, __PRETTY_FUNCTION__))
;
5336
5337 SmallVector<MCSymbol *, 3> Args;
5338 for (int Idx = 0; Idx < NbArgs; ++Idx) {
5339 StringRef Name;
5340 if (getParser().parseIdentifier(Name))
5341 return TokError("expected identifier in directive");
5342 Args.push_back(getContext().getOrCreateSymbol(Name));
5343
5344 if (Idx + 1 == NbArgs)
5345 break;
5346 if (parseToken(AsmToken::Comma,
5347 "unexpected token in '" + Twine(IDVal) + "' directive"))
5348 return true;
5349 }
5350 if (parseToken(AsmToken::EndOfStatement,
5351 "unexpected token in '" + Twine(IDVal) + "' directive"))
5352 return true;
5353
5354 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
5355 return false;
5356}
5357
5358/// parseDirectiveLtorg
5359/// ::= .ltorg | .pool
5360bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5361 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5362 return true;
5363 getTargetStreamer().emitCurrentConstantPool();
5364 return false;
5365}
5366
5367/// parseDirectiveReq
5368/// ::= name .req registername
5369bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5370 MCAsmParser &Parser = getParser();
5371 Parser.Lex(); // Eat the '.req' token.
5372 SMLoc SRegLoc = getLoc();
5373 RegKind RegisterKind = RegKind::Scalar;
5374 unsigned RegNum;
5375 OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5376
5377 if (ParseRes != MatchOperand_Success) {
5378 StringRef Kind;
5379 RegisterKind = RegKind::NeonVector;
5380 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5381
5382 if (ParseRes == MatchOperand_ParseFail)
5383 return true;
5384
5385 if (ParseRes == MatchOperand_Success && !Kind.empty())
5386 return Error(SRegLoc, "vector register without type specifier expected");
5387 }
5388
5389 if (ParseRes != MatchOperand_Success) {
5390 StringRef Kind;
5391 RegisterKind = RegKind::SVEDataVector;
5392 ParseRes =
5393 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5394
5395 if (ParseRes == MatchOperand_ParseFail)
5396 return true;
5397
5398 if (ParseRes == MatchOperand_Success && !Kind.empty())
5399 return Error(SRegLoc,
5400 "sve vector register without type specifier expected");
5401 }
5402
5403 if (ParseRes != MatchOperand_Success) {
5404 StringRef Kind;
5405 RegisterKind = RegKind::SVEPredicateVector;
5406 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5407
5408 if (ParseRes == MatchOperand_ParseFail)
5409 return true;
5410
5411 if (ParseRes == MatchOperand_Success && !Kind.empty())
5412 return Error(SRegLoc,
5413 "sve predicate register without type specifier expected");
5414 }
5415
5416 if (ParseRes != MatchOperand_Success)
5417 return Error(SRegLoc, "register name or alias expected");
5418
5419 // Shouldn't be anything else.
5420 if (parseToken(AsmToken::EndOfStatement,
5421 "unexpected input in .req directive"))
5422 return true;
5423
5424 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
5425 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
5426 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
5427
5428 return false;
5429}
5430
5431/// parseDirectiveUneq
5432/// ::= .unreq registername
5433bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
5434 MCAsmParser &Parser = getParser();
5435 if (getTok().isNot(AsmToken::Identifier))
5436 return TokError("unexpected input in .unreq directive.");
5437 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
5438 Parser.Lex(); // Eat the identifier.
5439 if (parseToken(AsmToken::EndOfStatement))
5440 return addErrorSuffix("in '.unreq' directive");
5441 return false;
5442}
5443
5444bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
5445 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5446 return true;
5447 getStreamer().EmitCFINegateRAState();
5448 return false;
5449}
5450
5451/// parseDirectiveCFIBKeyFrame
5452/// ::= .cfi_b_key
5453bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
5454 if (parseToken(AsmToken::EndOfStatement,
5455 "unexpected token in '.cfi_b_key_frame'"))
5456 return true;
5457 getStreamer().EmitCFIBKeyFrame();
5458 return false;
5459}
5460
5461bool
5462AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
5463 AArch64MCExpr::VariantKind &ELFRefKind,
5464 MCSymbolRefExpr::VariantKind &DarwinRefKind,
5465 int64_t &Addend) {
5466 ELFRefKind = AArch64MCExpr::VK_INVALID;
5467 DarwinRefKind = MCSymbolRefExpr::VK_None;
5468 Addend = 0;
5469
5470 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
5471 ELFRefKind = AE->getKind();
5472 Expr = AE->getSubExpr();
5473 }
5474
5475 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
5476 if (SE) {
5477 // It's a simple symbol reference with no addend.
5478 DarwinRefKind = SE->getKind();
5479 return true;
5480 }
5481
5482 // Check that it looks like a symbol + an addend
5483 MCValue Res;
5484 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
5485 if (!Relocatable || Res.getSymB())
5486 return false;
5487
5488 // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
5489 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
5490 if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
5491 return false;
5492
5493 if (Res.getSymA())
5494 DarwinRefKind = Res.getSymA()->getKind();
5495 Addend = Res.getConstant();
5496
5497 // It's some symbol reference + a constant addend, but really
5498 // shouldn't use both Darwin and ELF syntax.
5499 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
5500 DarwinRefKind == MCSymbolRefExpr::VK_None;
5501}
5502
5503/// Force static initialization.
5504extern "C" void LLVMInitializeAArch64AsmParser() {
5505 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
5506 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
5507 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
5508}
5509
5510#define GET_REGISTER_MATCHER
5511#define GET_SUBTARGET_FEATURE_NAME
5512#define GET_MATCHER_IMPLEMENTATION
5513#define GET_MNEMONIC_SPELL_CHECKER
5514#include "AArch64GenAsmMatcher.inc"
5515
5516// Define this matcher function after the auto-generated include so we
5517// have the match class enum definitions.
5518unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
5519 unsigned Kind) {
5520 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
5521 // If the kind is a token for a literal immediate, check if our asm
5522 // operand matches. This is for InstAliases which have a fixed-value
5523 // immediate in the syntax.
5524 int64_t ExpectedVal;
5525 switch (Kind) {
5526 default:
5527 return Match_InvalidOperand;
5528 case MCK__35_0:
5529 ExpectedVal = 0;
5530 break;
5531 case MCK__35_1:
5532 ExpectedVal = 1;
5533 break;
5534 case MCK__35_12:
5535 ExpectedVal = 12;
5536 break;
5537 case MCK__35_16:
5538 ExpectedVal = 16;
5539 break;
5540 case MCK__35_2:
5541 ExpectedVal = 2;
5542 break;
5543 case MCK__35_24:
5544 ExpectedVal = 24;
5545 break;
5546 case MCK__35_3:
5547 ExpectedVal = 3;
5548 break;
5549 case MCK__35_32:
5550 ExpectedVal = 32;
5551 break;
5552 case MCK__35_4:
5553 ExpectedVal = 4;
5554 break;
5555 case MCK__35_48:
5556 ExpectedVal = 48;
5557 break;
5558 case MCK__35_6:
5559 ExpectedVal = 6;
5560 break;
5561 case MCK__35_64:
5562 ExpectedVal = 64;
5563 break;
5564 case MCK__35_8:
5565 ExpectedVal = 8;
5566 break;
5567 }
5568 if (!Op.isImm())
5569 return Match_InvalidOperand;
5570 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
5571 if (!CE)
5572 return Match_InvalidOperand;
5573 if (CE->getValue() == ExpectedVal)
5574 return Match_Success;
5575 return Match_InvalidOperand;
5576}
5577
5578OperandMatchResultTy
5579AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
5580
5581 SMLoc S = getLoc();
5582
5583 if (getParser().getTok().isNot(AsmToken::Identifier)) {
5584 Error(S, "expected register");
5585 return MatchOperand_ParseFail;
5586 }
5587
5588 unsigned FirstReg;
5589 OperandMatchResultTy Res = tryParseScalarRegister(FirstReg);
5590 if (Res != MatchOperand_Success)
5591 return MatchOperand_ParseFail;
5592
5593 const MCRegisterClass &WRegClass =
5594 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
5595 const MCRegisterClass &XRegClass =
5596 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
5597
5598 bool isXReg = XRegClass.contains(FirstReg),
5599 isWReg = WRegClass.contains(FirstReg);
5600 if (!isXReg && !isWReg) {
5601 Error(S, "expected first even register of a "
5602 "consecutive same-size even/odd register pair");
5603 return MatchOperand_ParseFail;
5604 }
5605
5606 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5607 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
5608
5609 if (FirstEncoding & 0x1) {
5610 Error(S, "expected first even register of a "
5611 "consecutive same-size even/odd register pair");
5612 return MatchOperand_ParseFail;
5613 }
5614
5615 if (getParser().getTok().isNot(AsmToken::Comma)) {
5616 Error(getLoc(), "expected comma");
5617 return MatchOperand_ParseFail;
5618 }
5619 // Eat the comma
5620 getParser().Lex();
5621
5622 SMLoc E = getLoc();
5623 unsigned SecondReg;
5624 Res = tryParseScalarRegister(SecondReg);
5625 if (Res != MatchOperand_Success)
5626 return MatchOperand_ParseFail;
5627
5628 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
5629 (isXReg && !XRegClass.contains(SecondReg)) ||
5630 (isWReg && !WRegClass.contains(SecondReg))) {
5631 Error(E,"expected second odd register of a "
5632 "consecutive same-size even/odd register pair");
5633 return MatchOperand_ParseFail;
5634 }
5635
5636 unsigned Pair = 0;
5637 if (isXReg) {
5638 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
5639 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
5640 } else {
5641 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
5642 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
5643 }
5644
5645 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
5646 getLoc(), getContext()));
5647
5648 return MatchOperand_Success;
5649}
5650
5651template <bool ParseShiftExtend, bool ParseSuffix>
5652OperandMatchResultTy
5653AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
5654 const SMLoc S = getLoc();
5655 // Check for a SVE vector register specifier first.
5656 unsigned RegNum;
5657 StringRef Kind;
5658
5659 OperandMatchResultTy Res =
5660 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5661
5662 if (Res != MatchOperand_Success)
5663 return Res;
5664
5665 if (ParseSuffix && Kind.empty())
5666 return MatchOperand_NoMatch;
5667
5668 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
5669 if (!KindRes)
5670 return MatchOperand_NoMatch;
5671
5672 unsigned ElementWidth = KindRes->second;
5673
5674 // No shift/extend is the default.
5675 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
5676 Operands.push_back(AArch64Operand::CreateVectorReg(
5677 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
5678
5679 OperandMatchResultTy Res = tryParseVectorIndex(Operands);
5680 if (Res == MatchOperand_ParseFail)
5681 return MatchOperand_ParseFail;
5682 return MatchOperand_Success;
5683 }
5684
5685 // Eat the comma
5686 getParser().Lex();
5687
5688 // Match the shift
5689 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
5690 Res = tryParseOptionalShiftExtend(ExtOpnd);
5691 if (Res != MatchOperand_Success)
5692 return Res;
5693
5694 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
5695 Operands.push_back(AArch64Operand::CreateVectorReg(
5696 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
5697 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5698 Ext->hasShiftExtendAmount()));
5699
5700 return MatchOperand_Success;
5701}
5702
5703OperandMatchResultTy
5704AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
5705 MCAsmParser &Parser = getParser();
5706
5707 SMLoc SS = getLoc();
5708 const AsmToken &TokE = Parser.getTok();
5709 bool IsHash = TokE.is(AsmToken::Hash);
5710
5711 if (!IsHash && TokE.isNot(AsmToken::Identifier))
5712 return MatchOperand_NoMatch;
5713
5714 int64_t Pattern;
5715 if (IsHash) {
5716 Parser.Lex(); // Eat hash
5717
5718 // Parse the immediate operand.
5719 const MCExpr *ImmVal;
5720 SS = getLoc();
5721 if (Parser.parseExpression(ImmVal))
5722 return MatchOperand_ParseFail;
5723
5724 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
5725 if (!MCE)
5726 return MatchOperand_ParseFail;
5727
5728 Pattern = MCE->getValue();
5729 } else {
5730 // Parse the pattern
5731 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
5732 if (!Pat)
5733 return MatchOperand_NoMatch;
5734
5735 Parser.Lex();
5736 Pattern = Pat->Encoding;
5737 assert(Pattern >= 0 && Pattern < 32)((Pattern >= 0 && Pattern < 32) ? static_cast<
void> (0) : __assert_fail ("Pattern >= 0 && Pattern < 32"
, "/build/llvm-toolchain-snapshot-9~svn358520/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5737, __PRETTY_FUNCTION__))
;
5738 }
5739
5740 Operands.push_back(
5741 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
5742 SS, getLoc(), getContext()));
5743
5744 return MatchOperand_Success;
5745}