Bug Summary

File:lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Warning:line 2540, column 7
1st function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AArch64AsmParser.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn350071/include -I /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/lib/Target/AArch64/AsmParser/.. -I /build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/.. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/lib/Target/AArch64/AsmParser -fdebug-prefix-map=/build/llvm-toolchain-snapshot-8~svn350071=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-12-27-042839-1215-1 -x c++ /build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp -faddrsig
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/AArch64AddressingModes.h"
11#include "MCTargetDesc/AArch64MCExpr.h"
12#include "MCTargetDesc/AArch64MCTargetDesc.h"
13#include "MCTargetDesc/AArch64TargetStreamer.h"
14#include "AArch64InstrInfo.h"
15#include "Utils/AArch64BaseInfo.h"
16#include "llvm/ADT/APFloat.h"
17#include "llvm/ADT/APInt.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/ADT/StringExtras.h"
22#include "llvm/ADT/StringMap.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/StringSwitch.h"
25#include "llvm/ADT/Twine.h"
26#include "llvm/MC/MCContext.h"
27#include "llvm/MC/MCExpr.h"
28#include "llvm/MC/MCInst.h"
29#include "llvm/MC/MCLinkerOptimizationHint.h"
30#include "llvm/MC/MCObjectFileInfo.h"
31#include "llvm/MC/MCParser/MCAsmLexer.h"
32#include "llvm/MC/MCParser/MCAsmParser.h"
33#include "llvm/MC/MCParser/MCAsmParserExtension.h"
34#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
35#include "llvm/MC/MCParser/MCTargetAsmParser.h"
36#include "llvm/MC/MCRegisterInfo.h"
37#include "llvm/MC/MCStreamer.h"
38#include "llvm/MC/MCSubtargetInfo.h"
39#include "llvm/MC/MCSymbol.h"
40#include "llvm/MC/MCTargetOptions.h"
41#include "llvm/MC/SubtargetFeature.h"
42#include "llvm/MC/MCValue.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/Compiler.h"
45#include "llvm/Support/ErrorHandling.h"
46#include "llvm/Support/MathExtras.h"
47#include "llvm/Support/SMLoc.h"
48#include "llvm/Support/TargetParser.h"
49#include "llvm/Support/TargetRegistry.h"
50#include "llvm/Support/raw_ostream.h"
51#include <cassert>
52#include <cctype>
53#include <cstdint>
54#include <cstdio>
55#include <string>
56#include <tuple>
57#include <utility>
58#include <vector>
59
60using namespace llvm;
61
62namespace {
63
64enum class RegKind {
65 Scalar,
66 NeonVector,
67 SVEDataVector,
68 SVEPredicateVector
69};
70
71enum RegConstraintEqualityTy {
72 EqualsReg,
73 EqualsSuperReg,
74 EqualsSubReg
75};
76
77class AArch64AsmParser : public MCTargetAsmParser {
78private:
79 StringRef Mnemonic; ///< Instruction mnemonic.
80
81 // Map of register aliases registers via the .req directive.
82 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
83
84 class PrefixInfo {
85 public:
86 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
87 PrefixInfo Prefix;
88 switch (Inst.getOpcode()) {
89 case AArch64::MOVPRFX_ZZ:
90 Prefix.Active = true;
91 Prefix.Dst = Inst.getOperand(0).getReg();
92 break;
93 case AArch64::MOVPRFX_ZPmZ_B:
94 case AArch64::MOVPRFX_ZPmZ_H:
95 case AArch64::MOVPRFX_ZPmZ_S:
96 case AArch64::MOVPRFX_ZPmZ_D:
97 Prefix.Active = true;
98 Prefix.Predicated = true;
99 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
100 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 101, __PRETTY_FUNCTION__))
101 "No destructive element size set for movprfx")((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 101, __PRETTY_FUNCTION__))
;
102 Prefix.Dst = Inst.getOperand(0).getReg();
103 Prefix.Pg = Inst.getOperand(2).getReg();
104 break;
105 case AArch64::MOVPRFX_ZPzZ_B:
106 case AArch64::MOVPRFX_ZPzZ_H:
107 case AArch64::MOVPRFX_ZPzZ_S:
108 case AArch64::MOVPRFX_ZPzZ_D:
109 Prefix.Active = true;
110 Prefix.Predicated = true;
111 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
112 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 113, __PRETTY_FUNCTION__))
113 "No destructive element size set for movprfx")((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 113, __PRETTY_FUNCTION__))
;
114 Prefix.Dst = Inst.getOperand(0).getReg();
115 Prefix.Pg = Inst.getOperand(1).getReg();
116 break;
117 default:
118 break;
119 }
120
121 return Prefix;
122 }
123
124 PrefixInfo() : Active(false), Predicated(false) {}
125 bool isActive() const { return Active; }
126 bool isPredicated() const { return Predicated; }
127 unsigned getElementSize() const {
128 assert(Predicated)((Predicated) ? static_cast<void> (0) : __assert_fail (
"Predicated", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 128, __PRETTY_FUNCTION__))
;
129 return ElementSize;
130 }
131 unsigned getDstReg() const { return Dst; }
132 unsigned getPgReg() const {
133 assert(Predicated)((Predicated) ? static_cast<void> (0) : __assert_fail (
"Predicated", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 133, __PRETTY_FUNCTION__))
;
134 return Pg;
135 }
136
137 private:
138 bool Active;
139 bool Predicated;
140 unsigned ElementSize;
141 unsigned Dst;
142 unsigned Pg;
143 } NextPrefix;
144
145 AArch64TargetStreamer &getTargetStreamer() {
146 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
147 return static_cast<AArch64TargetStreamer &>(TS);
148 }
149
150 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
151
152 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
153 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
154 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
155 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
156 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
157 bool parseRegister(OperandVector &Operands);
158 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
159 bool parseNeonVectorList(OperandVector &Operands);
160 bool parseOptionalMulOperand(OperandVector &Operands);
161 bool parseOperand(OperandVector &Operands, bool isCondCode,
162 bool invertCondCode);
163
164 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
165 OperandVector &Operands);
166
167 bool parseDirectiveArch(SMLoc L);
168 bool parseDirectiveCPU(SMLoc L);
169 bool parseDirectiveInst(SMLoc L);
170
171 bool parseDirectiveTLSDescCall(SMLoc L);
172
173 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
174 bool parseDirectiveLtorg(SMLoc L);
175
176 bool parseDirectiveReq(StringRef Name, SMLoc L);
177 bool parseDirectiveUnreq(SMLoc L);
178 bool parseDirectiveCFINegateRAState();
179 bool parseDirectiveCFIBKeyFrame();
180
181 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
182 SmallVectorImpl<SMLoc> &Loc);
183 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
184 OperandVector &Operands, MCStreamer &Out,
185 uint64_t &ErrorInfo,
186 bool MatchingInlineAsm) override;
187/// @name Auto-generated Match Functions
188/// {
189
190#define GET_ASSEMBLER_HEADER
191#include "AArch64GenAsmMatcher.inc"
192
193 /// }
194
195 OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
196 OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
197 RegKind MatchKind);
198 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
199 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
200 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
201 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
202 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
203 template <bool IsSVEPrefetch = false>
204 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
205 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
206 OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
207 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
208 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
209 template<bool AddFPZeroAsLiteral>
210 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
211 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
212 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
213 bool tryParseNeonVectorRegister(OperandVector &Operands);
214 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
215 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
216 template <bool ParseShiftExtend,
217 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
218 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
219 template <bool ParseShiftExtend, bool ParseSuffix>
220 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
221 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
222 template <RegKind VectorKind>
223 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
224 bool ExpectMatch = false);
225 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
226
227public:
228 enum AArch64MatchResultTy {
229 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
230#define GET_OPERAND_DIAGNOSTIC_TYPES
231#include "AArch64GenAsmMatcher.inc"
232 };
233 bool IsILP32;
234
235 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
236 const MCInstrInfo &MII, const MCTargetOptions &Options)
237 : MCTargetAsmParser(Options, STI, MII) {
238 IsILP32 = Options.getABIName() == "ilp32";
239 MCAsmParserExtension::Initialize(Parser);
240 MCStreamer &S = getParser().getStreamer();
241 if (S.getTargetStreamer() == nullptr)
242 new AArch64TargetStreamer(S);
243
244 // Alias .hword/.word/xword to the target-independent .2byte/.4byte/.8byte
245 // directives as they have the same form and semantics:
246 /// ::= (.hword | .word | .xword ) [ expression (, expression)* ]
247 Parser.addAliasForDirective(".hword", ".2byte");
248 Parser.addAliasForDirective(".word", ".4byte");
249 Parser.addAliasForDirective(".xword", ".8byte");
250
251 // Initialize the set of available features.
252 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
253 }
254
255 bool regsEqual(const MCParsedAsmOperand &Op1,
256 const MCParsedAsmOperand &Op2) const override;
257 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
258 SMLoc NameLoc, OperandVector &Operands) override;
259 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
260 bool ParseDirective(AsmToken DirectiveID) override;
261 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
262 unsigned Kind) override;
263
264 static bool classifySymbolRef(const MCExpr *Expr,
265 AArch64MCExpr::VariantKind &ELFRefKind,
266 MCSymbolRefExpr::VariantKind &DarwinRefKind,
267 int64_t &Addend);
268};
269
270/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
271/// instruction.
272class AArch64Operand : public MCParsedAsmOperand {
273private:
274 enum KindTy {
275 k_Immediate,
276 k_ShiftedImm,
277 k_CondCode,
278 k_Register,
279 k_VectorList,
280 k_VectorIndex,
281 k_Token,
282 k_SysReg,
283 k_SysCR,
284 k_Prefetch,
285 k_ShiftExtend,
286 k_FPImm,
287 k_Barrier,
288 k_PSBHint,
289 k_BTIHint,
290 } Kind;
291
292 SMLoc StartLoc, EndLoc;
293
294 struct TokOp {
295 const char *Data;
296 unsigned Length;
297 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
298 };
299
300 // Separate shift/extend operand.
301 struct ShiftExtendOp {
302 AArch64_AM::ShiftExtendType Type;
303 unsigned Amount;
304 bool HasExplicitAmount;
305 };
306
307 struct RegOp {
308 unsigned RegNum;
309 RegKind Kind;
310 int ElementWidth;
311
312 // The register may be allowed as a different register class,
313 // e.g. for GPR64as32 or GPR32as64.
314 RegConstraintEqualityTy EqualityTy;
315
316 // In some cases the shift/extend needs to be explicitly parsed together
317 // with the register, rather than as a separate operand. This is needed
318 // for addressing modes where the instruction as a whole dictates the
319 // scaling/extend, rather than specific bits in the instruction.
320 // By parsing them as a single operand, we avoid the need to pass an
321 // extra operand in all CodeGen patterns (because all operands need to
322 // have an associated value), and we avoid the need to update TableGen to
323 // accept operands that have no associated bits in the instruction.
324 //
325 // An added benefit of parsing them together is that the assembler
326 // can give a sensible diagnostic if the scaling is not correct.
327 //
328 // The default is 'lsl #0' (HasExplicitAmount = false) if no
329 // ShiftExtend is specified.
330 ShiftExtendOp ShiftExtend;
331 };
332
333 struct VectorListOp {
334 unsigned RegNum;
335 unsigned Count;
336 unsigned NumElements;
337 unsigned ElementWidth;
338 RegKind RegisterKind;
339 };
340
341 struct VectorIndexOp {
342 unsigned Val;
343 };
344
345 struct ImmOp {
346 const MCExpr *Val;
347 };
348
349 struct ShiftedImmOp {
350 const MCExpr *Val;
351 unsigned ShiftAmount;
352 };
353
354 struct CondCodeOp {
355 AArch64CC::CondCode Code;
356 };
357
358 struct FPImmOp {
359 uint64_t Val; // APFloat value bitcasted to uint64_t.
360 bool IsExact; // describes whether parsed value was exact.
361 };
362
363 struct BarrierOp {
364 const char *Data;
365 unsigned Length;
366 unsigned Val; // Not the enum since not all values have names.
367 };
368
369 struct SysRegOp {
370 const char *Data;
371 unsigned Length;
372 uint32_t MRSReg;
373 uint32_t MSRReg;
374 uint32_t PStateField;
375 };
376
377 struct SysCRImmOp {
378 unsigned Val;
379 };
380
381 struct PrefetchOp {
382 const char *Data;
383 unsigned Length;
384 unsigned Val;
385 };
386
387 struct PSBHintOp {
388 const char *Data;
389 unsigned Length;
390 unsigned Val;
391 };
392
393 struct BTIHintOp {
394 const char *Data;
395 unsigned Length;
396 unsigned Val;
397 };
398
399 struct ExtendOp {
400 unsigned Val;
401 };
402
403 union {
404 struct TokOp Tok;
405 struct RegOp Reg;
406 struct VectorListOp VectorList;
407 struct VectorIndexOp VectorIndex;
408 struct ImmOp Imm;
409 struct ShiftedImmOp ShiftedImm;
410 struct CondCodeOp CondCode;
411 struct FPImmOp FPImm;
412 struct BarrierOp Barrier;
413 struct SysRegOp SysReg;
414 struct SysCRImmOp SysCRImm;
415 struct PrefetchOp Prefetch;
416 struct PSBHintOp PSBHint;
417 struct BTIHintOp BTIHint;
418 struct ShiftExtendOp ShiftExtend;
419 };
420
421 // Keep the MCContext around as the MCExprs may need manipulated during
422 // the add<>Operands() calls.
423 MCContext &Ctx;
424
425public:
426 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
427
428 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
429 Kind = o.Kind;
430 StartLoc = o.StartLoc;
431 EndLoc = o.EndLoc;
432 switch (Kind) {
433 case k_Token:
434 Tok = o.Tok;
435 break;
436 case k_Immediate:
437 Imm = o.Imm;
438 break;
439 case k_ShiftedImm:
440 ShiftedImm = o.ShiftedImm;
441 break;
442 case k_CondCode:
443 CondCode = o.CondCode;
444 break;
445 case k_FPImm:
446 FPImm = o.FPImm;
447 break;
448 case k_Barrier:
449 Barrier = o.Barrier;
450 break;
451 case k_Register:
452 Reg = o.Reg;
453 break;
454 case k_VectorList:
455 VectorList = o.VectorList;
456 break;
457 case k_VectorIndex:
458 VectorIndex = o.VectorIndex;
459 break;
460 case k_SysReg:
461 SysReg = o.SysReg;
462 break;
463 case k_SysCR:
464 SysCRImm = o.SysCRImm;
465 break;
466 case k_Prefetch:
467 Prefetch = o.Prefetch;
468 break;
469 case k_PSBHint:
470 PSBHint = o.PSBHint;
471 break;
472 case k_BTIHint:
473 BTIHint = o.BTIHint;
474 break;
475 case k_ShiftExtend:
476 ShiftExtend = o.ShiftExtend;
477 break;
478 }
479 }
480
481 /// getStartLoc - Get the location of the first token of this operand.
482 SMLoc getStartLoc() const override { return StartLoc; }
483 /// getEndLoc - Get the location of the last token of this operand.
484 SMLoc getEndLoc() const override { return EndLoc; }
485
486 StringRef getToken() const {
487 assert(Kind == k_Token && "Invalid access!")((Kind == k_Token && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 487, __PRETTY_FUNCTION__))
;
488 return StringRef(Tok.Data, Tok.Length);
489 }
490
491 bool isTokenSuffix() const {
492 assert(Kind == k_Token && "Invalid access!")((Kind == k_Token && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 492, __PRETTY_FUNCTION__))
;
493 return Tok.IsSuffix;
494 }
495
496 const MCExpr *getImm() const {
497 assert(Kind == k_Immediate && "Invalid access!")((Kind == k_Immediate && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 497, __PRETTY_FUNCTION__))
;
498 return Imm.Val;
499 }
500
501 const MCExpr *getShiftedImmVal() const {
502 assert(Kind == k_ShiftedImm && "Invalid access!")((Kind == k_ShiftedImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 502, __PRETTY_FUNCTION__))
;
503 return ShiftedImm.Val;
504 }
505
506 unsigned getShiftedImmShift() const {
507 assert(Kind == k_ShiftedImm && "Invalid access!")((Kind == k_ShiftedImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 507, __PRETTY_FUNCTION__))
;
508 return ShiftedImm.ShiftAmount;
509 }
510
511 AArch64CC::CondCode getCondCode() const {
512 assert(Kind == k_CondCode && "Invalid access!")((Kind == k_CondCode && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 512, __PRETTY_FUNCTION__))
;
513 return CondCode.Code;
514 }
515
516 APFloat getFPImm() const {
517 assert (Kind == k_FPImm && "Invalid access!")((Kind == k_FPImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 517, __PRETTY_FUNCTION__))
;
518 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
519 }
520
521 bool getFPImmIsExact() const {
522 assert (Kind == k_FPImm && "Invalid access!")((Kind == k_FPImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 522, __PRETTY_FUNCTION__))
;
523 return FPImm.IsExact;
524 }
525
526 unsigned getBarrier() const {
527 assert(Kind == k_Barrier && "Invalid access!")((Kind == k_Barrier && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 527, __PRETTY_FUNCTION__))
;
528 return Barrier.Val;
529 }
530
531 StringRef getBarrierName() const {
532 assert(Kind == k_Barrier && "Invalid access!")((Kind == k_Barrier && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 532, __PRETTY_FUNCTION__))
;
533 return StringRef(Barrier.Data, Barrier.Length);
534 }
535
536 unsigned getReg() const override {
537 assert(Kind == k_Register && "Invalid access!")((Kind == k_Register && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 537, __PRETTY_FUNCTION__))
;
538 return Reg.RegNum;
539 }
540
541 RegConstraintEqualityTy getRegEqualityTy() const {
542 assert(Kind == k_Register && "Invalid access!")((Kind == k_Register && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 542, __PRETTY_FUNCTION__))
;
543 return Reg.EqualityTy;
544 }
545
546 unsigned getVectorListStart() const {
547 assert(Kind == k_VectorList && "Invalid access!")((Kind == k_VectorList && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 547, __PRETTY_FUNCTION__))
;
548 return VectorList.RegNum;
549 }
550
551 unsigned getVectorListCount() const {
552 assert(Kind == k_VectorList && "Invalid access!")((Kind == k_VectorList && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 552, __PRETTY_FUNCTION__))
;
553 return VectorList.Count;
554 }
555
556 unsigned getVectorIndex() const {
557 assert(Kind == k_VectorIndex && "Invalid access!")((Kind == k_VectorIndex && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 557, __PRETTY_FUNCTION__))
;
558 return VectorIndex.Val;
559 }
560
561 StringRef getSysReg() const {
562 assert(Kind == k_SysReg && "Invalid access!")((Kind == k_SysReg && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 562, __PRETTY_FUNCTION__))
;
563 return StringRef(SysReg.Data, SysReg.Length);
564 }
565
566 unsigned getSysCR() const {
567 assert(Kind == k_SysCR && "Invalid access!")((Kind == k_SysCR && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 567, __PRETTY_FUNCTION__))
;
568 return SysCRImm.Val;
569 }
570
571 unsigned getPrefetch() const {
572 assert(Kind == k_Prefetch && "Invalid access!")((Kind == k_Prefetch && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 572, __PRETTY_FUNCTION__))
;
573 return Prefetch.Val;
574 }
575
576 unsigned getPSBHint() const {
577 assert(Kind == k_PSBHint && "Invalid access!")((Kind == k_PSBHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 577, __PRETTY_FUNCTION__))
;
578 return PSBHint.Val;
579 }
580
581 StringRef getPSBHintName() const {
582 assert(Kind == k_PSBHint && "Invalid access!")((Kind == k_PSBHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 582, __PRETTY_FUNCTION__))
;
583 return StringRef(PSBHint.Data, PSBHint.Length);
584 }
585
586 unsigned getBTIHint() const {
587 assert(Kind == k_BTIHint && "Invalid access!")((Kind == k_BTIHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 587, __PRETTY_FUNCTION__))
;
588 return BTIHint.Val;
589 }
590
591 StringRef getBTIHintName() const {
592 assert(Kind == k_BTIHint && "Invalid access!")((Kind == k_BTIHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 592, __PRETTY_FUNCTION__))
;
593 return StringRef(BTIHint.Data, BTIHint.Length);
594 }
595
596 StringRef getPrefetchName() const {
597 assert(Kind == k_Prefetch && "Invalid access!")((Kind == k_Prefetch && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 597, __PRETTY_FUNCTION__))
;
598 return StringRef(Prefetch.Data, Prefetch.Length);
599 }
600
601 AArch64_AM::ShiftExtendType getShiftExtendType() const {
602 if (Kind == k_ShiftExtend)
603 return ShiftExtend.Type;
604 if (Kind == k_Register)
605 return Reg.ShiftExtend.Type;
606 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 606)
;
607 }
608
609 unsigned getShiftExtendAmount() const {
610 if (Kind == k_ShiftExtend)
611 return ShiftExtend.Amount;
612 if (Kind == k_Register)
613 return Reg.ShiftExtend.Amount;
614 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 614)
;
615 }
616
617 bool hasShiftExtendAmount() const {
618 if (Kind == k_ShiftExtend)
619 return ShiftExtend.HasExplicitAmount;
620 if (Kind == k_Register)
621 return Reg.ShiftExtend.HasExplicitAmount;
622 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 622)
;
623 }
624
625 bool isImm() const override { return Kind == k_Immediate; }
626 bool isMem() const override { return false; }
627
628 bool isUImm6() const {
629 if (!isImm())
630 return false;
631 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
632 if (!MCE)
633 return false;
634 int64_t Val = MCE->getValue();
635 return (Val >= 0 && Val < 64);
636 }
637
638 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
639
640 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
641 return isImmScaled<Bits, Scale>(true);
642 }
643
644 template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
645 return isImmScaled<Bits, Scale>(false);
646 }
647
648 template <int Bits, int Scale>
649 DiagnosticPredicate isImmScaled(bool Signed) const {
650 if (!isImm())
651 return DiagnosticPredicateTy::NoMatch;
652
653 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
654 if (!MCE)
655 return DiagnosticPredicateTy::NoMatch;
656
657 int64_t MinVal, MaxVal;
658 if (Signed) {
659 int64_t Shift = Bits - 1;
660 MinVal = (int64_t(1) << Shift) * -Scale;
661 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
662 } else {
663 MinVal = 0;
664 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
665 }
666
667 int64_t Val = MCE->getValue();
668 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
669 return DiagnosticPredicateTy::Match;
670
671 return DiagnosticPredicateTy::NearMatch;
672 }
673
674 DiagnosticPredicate isSVEPattern() const {
675 if (!isImm())
676 return DiagnosticPredicateTy::NoMatch;
677 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
678 if (!MCE)
679 return DiagnosticPredicateTy::NoMatch;
680 int64_t Val = MCE->getValue();
681 if (Val >= 0 && Val < 32)
682 return DiagnosticPredicateTy::Match;
683 return DiagnosticPredicateTy::NearMatch;
684 }
685
686 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
687 AArch64MCExpr::VariantKind ELFRefKind;
688 MCSymbolRefExpr::VariantKind DarwinRefKind;
689 int64_t Addend;
690 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
691 Addend)) {
692 // If we don't understand the expression, assume the best and
693 // let the fixup and relocation code deal with it.
694 return true;
695 }
696
697 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
698 ELFRefKind == AArch64MCExpr::VK_LO12 ||
699 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
700 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
701 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
702 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
703 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
704 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
705 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
706 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
707 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
708 // Note that we don't range-check the addend. It's adjusted modulo page
709 // size when converted, so there is no "out of range" condition when using
710 // @pageoff.
711 return true;
712 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
713 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
714 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
715 return Addend == 0;
716 }
717
718 return false;
719 }
720
721 template <int Scale> bool isUImm12Offset() const {
722 if (!isImm())
723 return false;
724
725 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
726 if (!MCE)
727 return isSymbolicUImm12Offset(getImm());
728
729 int64_t Val = MCE->getValue();
730 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
731 }
732
733 template <int N, int M>
734 bool isImmInRange() const {
735 if (!isImm())
736 return false;
737 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
738 if (!MCE)
739 return false;
740 int64_t Val = MCE->getValue();
741 return (Val >= N && Val <= M);
742 }
743
744 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
745 // a logical immediate can always be represented when inverted.
746 template <typename T>
747 bool isLogicalImm() const {
748 if (!isImm())
749 return false;
750 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
751 if (!MCE)
752 return false;
753
754 int64_t Val = MCE->getValue();
755 int64_t SVal = typename std::make_signed<T>::type(Val);
756 int64_t UVal = typename std::make_unsigned<T>::type(Val);
757 if (Val != SVal && Val != UVal)
758 return false;
759
760 return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8);
761 }
762
763 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
764
765 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
766 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
767 /// immediate that can be shifted by 'Shift'.
768 template <unsigned Width>
769 Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
770 if (isShiftedImm() && Width == getShiftedImmShift())
771 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
772 return std::make_pair(CE->getValue(), Width);
773
774 if (isImm())
775 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
776 int64_t Val = CE->getValue();
777 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
778 return std::make_pair(Val >> Width, Width);
779 else
780 return std::make_pair(Val, 0u);
781 }
782
783 return {};
784 }
785
786 bool isAddSubImm() const {
787 if (!isShiftedImm() && !isImm())
788 return false;
789
790 const MCExpr *Expr;
791
792 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
793 if (isShiftedImm()) {
794 unsigned Shift = ShiftedImm.ShiftAmount;
795 Expr = ShiftedImm.Val;
796 if (Shift != 0 && Shift != 12)
797 return false;
798 } else {
799 Expr = getImm();
800 }
801
802 AArch64MCExpr::VariantKind ELFRefKind;
803 MCSymbolRefExpr::VariantKind DarwinRefKind;
804 int64_t Addend;
805 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
806 DarwinRefKind, Addend)) {
807 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
808 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
809 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
810 || ELFRefKind == AArch64MCExpr::VK_LO12
811 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
812 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
813 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
814 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
815 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
816 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
817 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
818 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
819 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
820 }
821
822 // If it's a constant, it should be a real immediate in range.
823 if (auto ShiftedVal = getShiftedVal<12>())
824 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
825
826 // If it's an expression, we hope for the best and let the fixup/relocation
827 // code deal with it.
828 return true;
829 }
830
831 bool isAddSubImmNeg() const {
832 if (!isShiftedImm() && !isImm())
833 return false;
834
835 // Otherwise it should be a real negative immediate in range.
836 if (auto ShiftedVal = getShiftedVal<12>())
837 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
838
839 return false;
840 }
841
842 // Signed value in the range -128 to +127. For element widths of
843 // 16 bits or higher it may also be a signed multiple of 256 in the
844 // range -32768 to +32512.
845 // For element-width of 8 bits a range of -128 to 255 is accepted,
846 // since a copy of a byte can be either signed/unsigned.
847 template <typename T>
848 DiagnosticPredicate isSVECpyImm() const {
849 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
850 return DiagnosticPredicateTy::NoMatch;
851
852 bool IsByte =
853 std::is_same<int8_t, typename std::make_signed<T>::type>::value;
854 if (auto ShiftedImm = getShiftedVal<8>())
855 if (!(IsByte && ShiftedImm->second) &&
856 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
857 << ShiftedImm->second))
858 return DiagnosticPredicateTy::Match;
859
860 return DiagnosticPredicateTy::NearMatch;
861 }
862
863 // Unsigned value in the range 0 to 255. For element widths of
864 // 16 bits or higher it may also be a signed multiple of 256 in the
865 // range 0 to 65280.
866 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
867 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
868 return DiagnosticPredicateTy::NoMatch;
869
870 bool IsByte =
871 std::is_same<int8_t, typename std::make_signed<T>::type>::value;
872 if (auto ShiftedImm = getShiftedVal<8>())
873 if (!(IsByte && ShiftedImm->second) &&
874 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
875 << ShiftedImm->second))
876 return DiagnosticPredicateTy::Match;
877
878 return DiagnosticPredicateTy::NearMatch;
879 }
880
881 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
882 if (isLogicalImm<T>() && !isSVECpyImm<T>())
883 return DiagnosticPredicateTy::Match;
884 return DiagnosticPredicateTy::NoMatch;
885 }
886
887 bool isCondCode() const { return Kind == k_CondCode; }
888
889 bool isSIMDImmType10() const {
890 if (!isImm())
891 return false;
892 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
893 if (!MCE)
894 return false;
895 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
896 }
897
898 template<int N>
899 bool isBranchTarget() const {
900 if (!isImm())
901 return false;
902 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
903 if (!MCE)
904 return true;
905 int64_t Val = MCE->getValue();
906 if (Val & 0x3)
907 return false;
908 assert(N > 0 && "Branch target immediate cannot be 0 bits!")((N > 0 && "Branch target immediate cannot be 0 bits!"
) ? static_cast<void> (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 908, __PRETTY_FUNCTION__))
;
909 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
910 }
911
912 bool
913 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
914 if (!isImm())
915 return false;
916
917 AArch64MCExpr::VariantKind ELFRefKind;
918 MCSymbolRefExpr::VariantKind DarwinRefKind;
919 int64_t Addend;
920 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
921 DarwinRefKind, Addend)) {
922 return false;
923 }
924 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
925 return false;
926
927 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
928 if (ELFRefKind == AllowedModifiers[i])
929 return true;
930 }
931
932 return false;
933 }
934
935 bool isMovZSymbolG3() const {
936 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
937 }
938
939 bool isMovZSymbolG2() const {
940 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
941 AArch64MCExpr::VK_TPREL_G2,
942 AArch64MCExpr::VK_DTPREL_G2});
943 }
944
945 bool isMovZSymbolG1() const {
946 return isMovWSymbol({
947 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
948 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
949 AArch64MCExpr::VK_DTPREL_G1,
950 });
951 }
952
953 bool isMovZSymbolG0() const {
954 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
955 AArch64MCExpr::VK_TPREL_G0,
956 AArch64MCExpr::VK_DTPREL_G0});
957 }
958
959 bool isMovKSymbolG3() const {
960 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
961 }
962
963 bool isMovKSymbolG2() const {
964 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
965 }
966
967 bool isMovKSymbolG1() const {
968 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
969 AArch64MCExpr::VK_TPREL_G1_NC,
970 AArch64MCExpr::VK_DTPREL_G1_NC});
971 }
972
973 bool isMovKSymbolG0() const {
974 return isMovWSymbol(
975 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
976 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
977 }
978
979 template<int RegWidth, int Shift>
980 bool isMOVZMovAlias() const {
981 if (!isImm()) return false;
982
983 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
984 if (!CE) return false;
985 uint64_t Value = CE->getValue();
986
987 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
988 }
989
990 template<int RegWidth, int Shift>
991 bool isMOVNMovAlias() const {
992 if (!isImm()) return false;
993
994 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
995 if (!CE) return false;
996 uint64_t Value = CE->getValue();
997
998 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
999 }
1000
1001 bool isFPImm() const {
1002 return Kind == k_FPImm &&
1003 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1004 }
1005
1006 bool isBarrier() const { return Kind == k_Barrier; }
1007 bool isSysReg() const { return Kind == k_SysReg; }
1008
1009 bool isMRSSystemRegister() const {
1010 if (!isSysReg()) return false;
1011
1012 return SysReg.MRSReg != -1U;
1013 }
1014
1015 bool isMSRSystemRegister() const {
1016 if (!isSysReg()) return false;
1017 return SysReg.MSRReg != -1U;
1018 }
1019
1020 bool isSystemPStateFieldWithImm0_1() const {
1021 if (!isSysReg()) return false;
1022 return (SysReg.PStateField == AArch64PState::PAN ||
1023 SysReg.PStateField == AArch64PState::DIT ||
1024 SysReg.PStateField == AArch64PState::UAO ||
1025 SysReg.PStateField == AArch64PState::SSBS);
1026 }
1027
1028 bool isSystemPStateFieldWithImm0_15() const {
1029 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1030 return SysReg.PStateField != -1U;
1031 }
1032
1033 bool isReg() const override {
1034 return Kind == k_Register;
1035 }
1036
1037 bool isScalarReg() const {
1038 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1039 }
1040
1041 bool isNeonVectorReg() const {
1042 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1043 }
1044
1045 bool isNeonVectorRegLo() const {
1046 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1047 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1048 Reg.RegNum);
1049 }
1050
1051 template <unsigned Class> bool isSVEVectorReg() const {
1052 RegKind RK;
1053 switch (Class) {
1054 case AArch64::ZPRRegClassID:
1055 case AArch64::ZPR_3bRegClassID:
1056 case AArch64::ZPR_4bRegClassID:
1057 RK = RegKind::SVEDataVector;
1058 break;
1059 case AArch64::PPRRegClassID:
1060 case AArch64::PPR_3bRegClassID:
1061 RK = RegKind::SVEPredicateVector;
1062 break;
1063 default:
1064 llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class",
"/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1064)
;
1065 }
1066
1067 return (Kind == k_Register && Reg.Kind == RK) &&
1068 AArch64MCRegisterClasses[Class].contains(getReg());
1069 }
1070
1071 template <unsigned Class> bool isFPRasZPR() const {
1072 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1073 AArch64MCRegisterClasses[Class].contains(getReg());
1074 }
1075
1076 template <int ElementWidth, unsigned Class>
1077 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1078 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1079 return DiagnosticPredicateTy::NoMatch;
1080
1081 if (isSVEVectorReg<Class>() &&
1082 (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
1083 return DiagnosticPredicateTy::Match;
1084
1085 return DiagnosticPredicateTy::NearMatch;
1086 }
1087
1088 template <int ElementWidth, unsigned Class>
1089 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1090 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1091 return DiagnosticPredicateTy::NoMatch;
1092
1093 if (isSVEVectorReg<Class>() &&
1094 (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
1095 return DiagnosticPredicateTy::Match;
1096
1097 return DiagnosticPredicateTy::NearMatch;
1098 }
1099
1100 template <int ElementWidth, unsigned Class,
1101 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1102 bool ShiftWidthAlwaysSame>
1103 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1104 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1105 if (!VectorMatch.isMatch())
1106 return DiagnosticPredicateTy::NoMatch;
1107
1108 // Give a more specific diagnostic when the user has explicitly typed in
1109 // a shift-amount that does not match what is expected, but for which
1110 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1111 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1112 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1113 ShiftExtendTy == AArch64_AM::SXTW) &&
1114 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1115 return DiagnosticPredicateTy::NoMatch;
1116
1117 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1118 return DiagnosticPredicateTy::Match;
1119
1120 return DiagnosticPredicateTy::NearMatch;
1121 }
1122
1123 bool isGPR32as64() const {
1124 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1125 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1126 }
1127
1128 bool isGPR64as32() const {
1129 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1130 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1131 }
1132
1133 bool isWSeqPair() const {
1134 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1135 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1136 Reg.RegNum);
1137 }
1138
1139 bool isXSeqPair() const {
1140 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1141 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1142 Reg.RegNum);
1143 }
1144
1145 template<int64_t Angle, int64_t Remainder>
1146 DiagnosticPredicate isComplexRotation() const {
1147 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1148
1149 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1150 if (!CE) return DiagnosticPredicateTy::NoMatch;
1151 uint64_t Value = CE->getValue();
1152
1153 if (Value % Angle == Remainder && Value <= 270)
1154 return DiagnosticPredicateTy::Match;
1155 return DiagnosticPredicateTy::NearMatch;
1156 }
1157
1158 template <unsigned RegClassID> bool isGPR64() const {
1159 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1160 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1161 }
1162
1163 template <unsigned RegClassID, int ExtWidth>
1164 DiagnosticPredicate isGPR64WithShiftExtend() const {
1165 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1166 return DiagnosticPredicateTy::NoMatch;
1167
1168 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1169 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1170 return DiagnosticPredicateTy::Match;
1171 return DiagnosticPredicateTy::NearMatch;
1172 }
1173
1174 /// Is this a vector list with the type implicit (presumably attached to the
1175 /// instruction itself)?
1176 template <RegKind VectorKind, unsigned NumRegs>
1177 bool isImplicitlyTypedVectorList() const {
1178 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1179 VectorList.NumElements == 0 &&
1180 VectorList.RegisterKind == VectorKind;
1181 }
1182
1183 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1184 unsigned ElementWidth>
1185 bool isTypedVectorList() const {
1186 if (Kind != k_VectorList)
1187 return false;
1188 if (VectorList.Count != NumRegs)
1189 return false;
1190 if (VectorList.RegisterKind != VectorKind)
1191 return false;
1192 if (VectorList.ElementWidth != ElementWidth)
1193 return false;
1194 return VectorList.NumElements == NumElements;
1195 }
1196
1197 template <int Min, int Max>
1198 DiagnosticPredicate isVectorIndex() const {
1199 if (Kind != k_VectorIndex)
1200 return DiagnosticPredicateTy::NoMatch;
1201 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1202 return DiagnosticPredicateTy::Match;
1203 return DiagnosticPredicateTy::NearMatch;
1204 }
1205
1206 bool isToken() const override { return Kind == k_Token; }
1207
1208 bool isTokenEqual(StringRef Str) const {
1209 return Kind == k_Token && getToken() == Str;
1210 }
1211 bool isSysCR() const { return Kind == k_SysCR; }
1212 bool isPrefetch() const { return Kind == k_Prefetch; }
1213 bool isPSBHint() const { return Kind == k_PSBHint; }
1214 bool isBTIHint() const { return Kind == k_BTIHint; }
1215 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1216 bool isShifter() const {
1217 if (!isShiftExtend())
1218 return false;
1219
1220 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1221 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1222 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1223 ST == AArch64_AM::MSL);
1224 }
1225
1226 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1227 if (Kind != k_FPImm)
1228 return DiagnosticPredicateTy::NoMatch;
1229
1230 if (getFPImmIsExact()) {
1231 // Lookup the immediate from table of supported immediates.
1232 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1233 assert(Desc && "Unknown enum value")((Desc && "Unknown enum value") ? static_cast<void
> (0) : __assert_fail ("Desc && \"Unknown enum value\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1233, __PRETTY_FUNCTION__))
;
1234
1235 // Calculate its FP value.
1236 APFloat RealVal(APFloat::IEEEdouble());
1237 if (RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero) !=
1238 APFloat::opOK)
1239 llvm_unreachable("FP immediate is not exact")::llvm::llvm_unreachable_internal("FP immediate is not exact"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1239)
;
1240
1241 if (getFPImm().bitwiseIsEqual(RealVal))
1242 return DiagnosticPredicateTy::Match;
1243 }
1244
1245 return DiagnosticPredicateTy::NearMatch;
1246 }
1247
1248 template <unsigned ImmA, unsigned ImmB>
1249 DiagnosticPredicate isExactFPImm() const {
1250 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1251 if ((Res = isExactFPImm<ImmA>()))
1252 return DiagnosticPredicateTy::Match;
1253 if ((Res = isExactFPImm<ImmB>()))
1254 return DiagnosticPredicateTy::Match;
1255 return Res;
1256 }
1257
1258 bool isExtend() const {
1259 if (!isShiftExtend())
1260 return false;
1261
1262 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1263 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1264 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1265 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1266 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1267 ET == AArch64_AM::LSL) &&
1268 getShiftExtendAmount() <= 4;
1269 }
1270
1271 bool isExtend64() const {
1272 if (!isExtend())
1273 return false;
1274 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
1275 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1276 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
1277 }
1278
1279 bool isExtendLSL64() const {
1280 if (!isExtend())
1281 return false;
1282 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1283 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1284 ET == AArch64_AM::LSL) &&
1285 getShiftExtendAmount() <= 4;
1286 }
1287
1288 template<int Width> bool isMemXExtend() const {
1289 if (!isExtend())
1290 return false;
1291 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1292 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1293 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1294 getShiftExtendAmount() == 0);
1295 }
1296
1297 template<int Width> bool isMemWExtend() const {
1298 if (!isExtend())
1299 return false;
1300 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1301 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1302 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1303 getShiftExtendAmount() == 0);
1304 }
1305
1306 template <unsigned width>
1307 bool isArithmeticShifter() const {
1308 if (!isShifter())
1309 return false;
1310
1311 // An arithmetic shifter is LSL, LSR, or ASR.
1312 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1313 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1314 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1315 }
1316
1317 template <unsigned width>
1318 bool isLogicalShifter() const {
1319 if (!isShifter())
1320 return false;
1321
1322 // A logical shifter is LSL, LSR, ASR or ROR.
1323 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1324 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1325 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1326 getShiftExtendAmount() < width;
1327 }
1328
1329 bool isMovImm32Shifter() const {
1330 if (!isShifter())
1331 return false;
1332
1333 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1334 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1335 if (ST != AArch64_AM::LSL)
1336 return false;
1337 uint64_t Val = getShiftExtendAmount();
1338 return (Val == 0 || Val == 16);
1339 }
1340
1341 bool isMovImm64Shifter() const {
1342 if (!isShifter())
1343 return false;
1344
1345 // A MOVi shifter is LSL of 0 or 16.
1346 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1347 if (ST != AArch64_AM::LSL)
1348 return false;
1349 uint64_t Val = getShiftExtendAmount();
1350 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1351 }
1352
1353 bool isLogicalVecShifter() const {
1354 if (!isShifter())
1355 return false;
1356
1357 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1358 unsigned Shift = getShiftExtendAmount();
1359 return getShiftExtendType() == AArch64_AM::LSL &&
1360 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1361 }
1362
1363 bool isLogicalVecHalfWordShifter() const {
1364 if (!isLogicalVecShifter())
1365 return false;
1366
1367 // A logical vector shifter is a left shift by 0 or 8.
1368 unsigned Shift = getShiftExtendAmount();
1369 return getShiftExtendType() == AArch64_AM::LSL &&
1370 (Shift == 0 || Shift == 8);
1371 }
1372
1373 bool isMoveVecShifter() const {
1374 if (!isShiftExtend())
1375 return false;
1376
1377 // A logical vector shifter is a left shift by 8 or 16.
1378 unsigned Shift = getShiftExtendAmount();
1379 return getShiftExtendType() == AArch64_AM::MSL &&
1380 (Shift == 8 || Shift == 16);
1381 }
1382
1383 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1384 // to LDUR/STUR when the offset is not legal for the former but is for
1385 // the latter. As such, in addition to checking for being a legal unscaled
1386 // address, also check that it is not a legal scaled address. This avoids
1387 // ambiguity in the matcher.
1388 template<int Width>
1389 bool isSImm9OffsetFB() const {
1390 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1391 }
1392
1393 bool isAdrpLabel() const {
1394 // Validation was handled during parsing, so we just sanity check that
1395 // something didn't go haywire.
1396 if (!isImm())
1397 return false;
1398
1399 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1400 int64_t Val = CE->getValue();
1401 int64_t Min = - (4096 * (1LL << (21 - 1)));
1402 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1403 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1404 }
1405
1406 return true;
1407 }
1408
1409 bool isAdrLabel() const {
1410 // Validation was handled during parsing, so we just sanity check that
1411 // something didn't go haywire.
1412 if (!isImm())
1413 return false;
1414
1415 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1416 int64_t Val = CE->getValue();
1417 int64_t Min = - (1LL << (21 - 1));
1418 int64_t Max = ((1LL << (21 - 1)) - 1);
1419 return Val >= Min && Val <= Max;
1420 }
1421
1422 return true;
1423 }
1424
1425 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1426 // Add as immediates when possible. Null MCExpr = 0.
1427 if (!Expr)
1428 Inst.addOperand(MCOperand::createImm(0));
1429 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1430 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1431 else
1432 Inst.addOperand(MCOperand::createExpr(Expr));
1433 }
1434
1435 void addRegOperands(MCInst &Inst, unsigned N) const {
1436 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1436, __PRETTY_FUNCTION__))
;
1437 Inst.addOperand(MCOperand::createReg(getReg()));
1438 }
1439
1440 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1441 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1441, __PRETTY_FUNCTION__))
;
1442 assert(((AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1443, __PRETTY_FUNCTION__))
1443 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1443, __PRETTY_FUNCTION__))
;
1444
1445 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1446 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1447 RI->getEncodingValue(getReg()));
1448
1449 Inst.addOperand(MCOperand::createReg(Reg));
1450 }
1451
1452 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1453 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1453, __PRETTY_FUNCTION__))
;
1454 assert(((AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1455, __PRETTY_FUNCTION__))
1455 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1455, __PRETTY_FUNCTION__))
;
1456
1457 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1458 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1459 RI->getEncodingValue(getReg()));
1460
1461 Inst.addOperand(MCOperand::createReg(Reg));
1462 }
1463
1464 template <int Width>
1465 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1466 unsigned Base;
1467 switch (Width) {
1468 case 8: Base = AArch64::B0; break;
1469 case 16: Base = AArch64::H0; break;
1470 case 32: Base = AArch64::S0; break;
1471 case 64: Base = AArch64::D0; break;
1472 case 128: Base = AArch64::Q0; break;
1473 default:
1474 llvm_unreachable("Unsupported width")::llvm::llvm_unreachable_internal("Unsupported width", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1474)
;
1475 }
1476 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1477 }
1478
1479 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1480 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1480, __PRETTY_FUNCTION__))
;
1481 assert(((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1482, __PRETTY_FUNCTION__))
1482 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1482, __PRETTY_FUNCTION__))
;
1483 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1484 }
1485
1486 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1487 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1487, __PRETTY_FUNCTION__))
;
1488 assert(((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1489, __PRETTY_FUNCTION__))
1489 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1489, __PRETTY_FUNCTION__))
;
1490 Inst.addOperand(MCOperand::createReg(getReg()));
1491 }
1492
1493 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1494 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1494, __PRETTY_FUNCTION__))
;
1495 Inst.addOperand(MCOperand::createReg(getReg()));
1496 }
1497
1498 enum VecListIndexType {
1499 VecListIdx_DReg = 0,
1500 VecListIdx_QReg = 1,
1501 VecListIdx_ZReg = 2,
1502 };
1503
1504 template <VecListIndexType RegTy, unsigned NumRegs>
1505 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1506 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1506, __PRETTY_FUNCTION__))
;
1507 static const unsigned FirstRegs[][5] = {
1508 /* DReg */ { AArch64::Q0,
1509 AArch64::D0, AArch64::D0_D1,
1510 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1511 /* QReg */ { AArch64::Q0,
1512 AArch64::Q0, AArch64::Q0_Q1,
1513 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1514 /* ZReg */ { AArch64::Z0,
1515 AArch64::Z0, AArch64::Z0_Z1,
1516 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1517 };
1518
1519 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs"
) ? static_cast<void> (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1520, __PRETTY_FUNCTION__))
1520 " NumRegs must be <= 4 for ZRegs")(((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs"
) ? static_cast<void> (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1520, __PRETTY_FUNCTION__))
;
1521
1522 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1523 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1524 FirstRegs[(unsigned)RegTy][0]));
1525 }
1526
1527 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1528 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1528, __PRETTY_FUNCTION__))
;
1529 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1530 }
1531
1532 template <unsigned ImmIs0, unsigned ImmIs1>
1533 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1534 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1534, __PRETTY_FUNCTION__))
;
1535 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")((bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand"
) ? static_cast<void> (0) : __assert_fail ("bool(isExactFPImm<ImmIs0, ImmIs1>()) && \"Invalid operand\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1535, __PRETTY_FUNCTION__))
;
1536 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1537 }
1538
1539 void addImmOperands(MCInst &Inst, unsigned N) const {
1540 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1540, __PRETTY_FUNCTION__))
;
1541 // If this is a pageoff symrefexpr with an addend, adjust the addend
1542 // to be only the page-offset portion. Otherwise, just add the expr
1543 // as-is.
1544 addExpr(Inst, getImm());
1545 }
1546
1547 template <int Shift>
1548 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1549 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1549, __PRETTY_FUNCTION__))
;
1550 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1551 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1552 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1553 } else if (isShiftedImm()) {
1554 addExpr(Inst, getShiftedImmVal());
1555 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1556 } else {
1557 addExpr(Inst, getImm());
1558 Inst.addOperand(MCOperand::createImm(0));
1559 }
1560 }
1561
1562 template <int Shift>
1563 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1564 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1564, __PRETTY_FUNCTION__))
;
1565 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1566 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1567 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1568 } else
1569 llvm_unreachable("Not a shifted negative immediate")::llvm::llvm_unreachable_internal("Not a shifted negative immediate"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1569)
;
1570 }
1571
1572 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1573 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1573, __PRETTY_FUNCTION__))
;
1574 Inst.addOperand(MCOperand::createImm(getCondCode()));
1575 }
1576
1577 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1578 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1578, __PRETTY_FUNCTION__))
;
1579 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1580 if (!MCE)
1581 addExpr(Inst, getImm());
1582 else
1583 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1584 }
1585
1586 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1587 addImmOperands(Inst, N);
1588 }
1589
1590 template<int Scale>
1591 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1592 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1592, __PRETTY_FUNCTION__))
;
1593 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1594
1595 if (!MCE) {
1596 Inst.addOperand(MCOperand::createExpr(getImm()));
1597 return;
1598 }
1599 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1600 }
1601
1602 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1603 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1603, __PRETTY_FUNCTION__))
;
1604 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1605 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1606 }
1607
1608 template <int Scale>
1609 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1610 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1610, __PRETTY_FUNCTION__))
;
1611 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1612 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1613 }
1614
1615 template <typename T>
1616 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1617 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1617, __PRETTY_FUNCTION__))
;
1618 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1619 typename std::make_unsigned<T>::type Val = MCE->getValue();
1620 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1621 Inst.addOperand(MCOperand::createImm(encoding));
1622 }
1623
1624 template <typename T>
1625 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1626 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1626, __PRETTY_FUNCTION__))
;
1627 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1628 typename std::make_unsigned<T>::type Val = ~MCE->getValue();
1629 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1630 Inst.addOperand(MCOperand::createImm(encoding));
1631 }
1632
1633 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1634 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1634, __PRETTY_FUNCTION__))
;
1635 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1636 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1637 Inst.addOperand(MCOperand::createImm(encoding));
1638 }
1639
1640 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1641 // Branch operands don't encode the low bits, so shift them off
1642 // here. If it's a label, however, just put it on directly as there's
1643 // not enough information now to do anything.
1644 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1644, __PRETTY_FUNCTION__))
;
1645 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1646 if (!MCE) {
1647 addExpr(Inst, getImm());
1648 return;
1649 }
1650 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1650, __PRETTY_FUNCTION__))
;
1651 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1652 }
1653
1654 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1655 // Branch operands don't encode the low bits, so shift them off
1656 // here. If it's a label, however, just put it on directly as there's
1657 // not enough information now to do anything.
1658 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1658, __PRETTY_FUNCTION__))
;
1659 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1660 if (!MCE) {
1661 addExpr(Inst, getImm());
1662 return;
1663 }
1664 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1664, __PRETTY_FUNCTION__))
;
1665 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1666 }
1667
1668 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1669 // Branch operands don't encode the low bits, so shift them off
1670 // here. If it's a label, however, just put it on directly as there's
1671 // not enough information now to do anything.
1672 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1672, __PRETTY_FUNCTION__))
;
1673 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1674 if (!MCE) {
1675 addExpr(Inst, getImm());
1676 return;
1677 }
1678 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1678, __PRETTY_FUNCTION__))
;
1679 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1680 }
1681
1682 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1683 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1683, __PRETTY_FUNCTION__))
;
1684 Inst.addOperand(MCOperand::createImm(
1685 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1686 }
1687
1688 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1689 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1689, __PRETTY_FUNCTION__))
;
1690 Inst.addOperand(MCOperand::createImm(getBarrier()));
1691 }
1692
1693 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1694 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1694, __PRETTY_FUNCTION__))
;
1695
1696 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1697 }
1698
1699 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1700 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1700, __PRETTY_FUNCTION__))
;
1701
1702 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1703 }
1704
1705 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1706 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1706, __PRETTY_FUNCTION__))
;
1707
1708 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1709 }
1710
1711 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1712 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1712, __PRETTY_FUNCTION__))
;
1713
1714 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1715 }
1716
1717 void addSysCROperands(MCInst &Inst, unsigned N) const {
1718 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1718, __PRETTY_FUNCTION__))
;
1719 Inst.addOperand(MCOperand::createImm(getSysCR()));
1720 }
1721
1722 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1723 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1723, __PRETTY_FUNCTION__))
;
1724 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1725 }
1726
1727 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1728 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1728, __PRETTY_FUNCTION__))
;
1729 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1730 }
1731
1732 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1733 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1733, __PRETTY_FUNCTION__))
;
1734 Inst.addOperand(MCOperand::createImm(getBTIHint()));
1735 }
1736
1737 void addShifterOperands(MCInst &Inst, unsigned N) const {
1738 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1738, __PRETTY_FUNCTION__))
;
1739 unsigned Imm =
1740 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1741 Inst.addOperand(MCOperand::createImm(Imm));
1742 }
1743
1744 void addExtendOperands(MCInst &Inst, unsigned N) const {
1745 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1745, __PRETTY_FUNCTION__))
;
1746 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1747 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1748 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1749 Inst.addOperand(MCOperand::createImm(Imm));
1750 }
1751
1752 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1753 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1753, __PRETTY_FUNCTION__))
;
1754 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1755 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1756 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1757 Inst.addOperand(MCOperand::createImm(Imm));
1758 }
1759
1760 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1761 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1761, __PRETTY_FUNCTION__))
;
1762 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1763 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1764 Inst.addOperand(MCOperand::createImm(IsSigned));
1765 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1766 }
1767
1768 // For 8-bit load/store instructions with a register offset, both the
1769 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1770 // they're disambiguated by whether the shift was explicit or implicit rather
1771 // than its size.
1772 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1773 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1773, __PRETTY_FUNCTION__))
;
1774 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1775 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1776 Inst.addOperand(MCOperand::createImm(IsSigned));
1777 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1778 }
1779
1780 template<int Shift>
1781 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1782 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1782, __PRETTY_FUNCTION__))
;
1783
1784 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1785 uint64_t Value = CE->getValue();
1786 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1787 }
1788
1789 template<int Shift>
1790 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1791 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1791, __PRETTY_FUNCTION__))
;
1792
1793 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1794 uint64_t Value = CE->getValue();
1795 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1796 }
1797
1798 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1799 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1799, __PRETTY_FUNCTION__))
;
1800 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1801 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1802 }
1803
1804 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1805 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1805, __PRETTY_FUNCTION__))
;
1806 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1807 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1808 }
1809
1810 void print(raw_ostream &OS) const override;
1811
1812 static std::unique_ptr<AArch64Operand>
1813 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1814 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1815 Op->Tok.Data = Str.data();
1816 Op->Tok.Length = Str.size();
1817 Op->Tok.IsSuffix = IsSuffix;
1818 Op->StartLoc = S;
1819 Op->EndLoc = S;
1820 return Op;
1821 }
1822
1823 static std::unique_ptr<AArch64Operand>
1824 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1825 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1826 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1827 unsigned ShiftAmount = 0,
1828 unsigned HasExplicitAmount = false) {
1829 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1830 Op->Reg.RegNum = RegNum;
1831 Op->Reg.Kind = Kind;
1832 Op->Reg.ElementWidth = 0;
1833 Op->Reg.EqualityTy = EqTy;
1834 Op->Reg.ShiftExtend.Type = ExtTy;
1835 Op->Reg.ShiftExtend.Amount = ShiftAmount;
1836 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1837 Op->StartLoc = S;
1838 Op->EndLoc = E;
1839 return Op;
1840 }
1841
1842 static std::unique_ptr<AArch64Operand>
1843 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1844 SMLoc S, SMLoc E, MCContext &Ctx,
1845 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1846 unsigned ShiftAmount = 0,
1847 unsigned HasExplicitAmount = false) {
1848 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1850, __PRETTY_FUNCTION__))
1849 Kind == RegKind::SVEPredicateVector) &&(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1850, __PRETTY_FUNCTION__))
1850 "Invalid vector kind")(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1850, __PRETTY_FUNCTION__))
;
1851 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1852 HasExplicitAmount);
1853 Op->Reg.ElementWidth = ElementWidth;
1854 return Op;
1855 }
1856
1857 static std::unique_ptr<AArch64Operand>
1858 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1859 unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1860 MCContext &Ctx) {
1861 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1862 Op->VectorList.RegNum = RegNum;
1863 Op->VectorList.Count = Count;
1864 Op->VectorList.NumElements = NumElements;
1865 Op->VectorList.ElementWidth = ElementWidth;
1866 Op->VectorList.RegisterKind = RegisterKind;
1867 Op->StartLoc = S;
1868 Op->EndLoc = E;
1869 return Op;
1870 }
1871
1872 static std::unique_ptr<AArch64Operand>
1873 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1874 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1875 Op->VectorIndex.Val = Idx;
1876 Op->StartLoc = S;
1877 Op->EndLoc = E;
1878 return Op;
1879 }
1880
1881 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1882 SMLoc E, MCContext &Ctx) {
1883 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1884 Op->Imm.Val = Val;
1885 Op->StartLoc = S;
1886 Op->EndLoc = E;
1887 return Op;
1888 }
1889
1890 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1891 unsigned ShiftAmount,
1892 SMLoc S, SMLoc E,
1893 MCContext &Ctx) {
1894 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1895 Op->ShiftedImm .Val = Val;
1896 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1897 Op->StartLoc = S;
1898 Op->EndLoc = E;
1899 return Op;
1900 }
1901
1902 static std::unique_ptr<AArch64Operand>
1903 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1904 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1905 Op->CondCode.Code = Code;
1906 Op->StartLoc = S;
1907 Op->EndLoc = E;
1908 return Op;
1909 }
1910
1911 static std::unique_ptr<AArch64Operand>
1912 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1913 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1914 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1915 Op->FPImm.IsExact = IsExact;
1916 Op->StartLoc = S;
1917 Op->EndLoc = S;
1918 return Op;
1919 }
1920
1921 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1922 StringRef Str,
1923 SMLoc S,
1924 MCContext &Ctx) {
1925 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1926 Op->Barrier.Val = Val;
1927 Op->Barrier.Data = Str.data();
1928 Op->Barrier.Length = Str.size();
1929 Op->StartLoc = S;
1930 Op->EndLoc = S;
1931 return Op;
1932 }
1933
1934 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1935 uint32_t MRSReg,
1936 uint32_t MSRReg,
1937 uint32_t PStateField,
1938 MCContext &Ctx) {
1939 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1940 Op->SysReg.Data = Str.data();
1941 Op->SysReg.Length = Str.size();
1942 Op->SysReg.MRSReg = MRSReg;
1943 Op->SysReg.MSRReg = MSRReg;
1944 Op->SysReg.PStateField = PStateField;
1945 Op->StartLoc = S;
1946 Op->EndLoc = S;
1947 return Op;
1948 }
1949
1950 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1951 SMLoc E, MCContext &Ctx) {
1952 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1953 Op->SysCRImm.Val = Val;
1954 Op->StartLoc = S;
1955 Op->EndLoc = E;
1956 return Op;
1957 }
1958
1959 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1960 StringRef Str,
1961 SMLoc S,
1962 MCContext &Ctx) {
1963 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1964 Op->Prefetch.Val = Val;
1965 Op->Barrier.Data = Str.data();
1966 Op->Barrier.Length = Str.size();
1967 Op->StartLoc = S;
1968 Op->EndLoc = S;
1969 return Op;
1970 }
1971
1972 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1973 StringRef Str,
1974 SMLoc S,
1975 MCContext &Ctx) {
1976 auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1977 Op->PSBHint.Val = Val;
1978 Op->PSBHint.Data = Str.data();
1979 Op->PSBHint.Length = Str.size();
1980 Op->StartLoc = S;
1981 Op->EndLoc = S;
1982 return Op;
1983 }
1984
1985 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
1986 StringRef Str,
1987 SMLoc S,
1988 MCContext &Ctx) {
1989 auto Op = make_unique<AArch64Operand>(k_BTIHint, Ctx);
1990 Op->BTIHint.Val = Val << 1 | 32;
1991 Op->BTIHint.Data = Str.data();
1992 Op->BTIHint.Length = Str.size();
1993 Op->StartLoc = S;
1994 Op->EndLoc = S;
1995 return Op;
1996 }
1997
1998 static std::unique_ptr<AArch64Operand>
1999 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2000 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2001 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2002 Op->ShiftExtend.Type = ShOp;
2003 Op->ShiftExtend.Amount = Val;
2004 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2005 Op->StartLoc = S;
2006 Op->EndLoc = E;
2007 return Op;
2008 }
2009};
2010
2011} // end anonymous namespace.
2012
2013void AArch64Operand::print(raw_ostream &OS) const {
2014 switch (Kind) {
2015 case k_FPImm:
2016 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2017 if (!getFPImmIsExact())
2018 OS << " (inexact)";
2019 OS << ">";
2020 break;
2021 case k_Barrier: {
2022 StringRef Name = getBarrierName();
2023 if (!Name.empty())
2024 OS << "<barrier " << Name << ">";
2025 else
2026 OS << "<barrier invalid #" << getBarrier() << ">";
2027 break;
2028 }
2029 case k_Immediate:
2030 OS << *getImm();
2031 break;
2032 case k_ShiftedImm: {
2033 unsigned Shift = getShiftedImmShift();
2034 OS << "<shiftedimm ";
2035 OS << *getShiftedImmVal();
2036 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2037 break;
2038 }
2039 case k_CondCode:
2040 OS << "<condcode " << getCondCode() << ">";
2041 break;
2042 case k_VectorList: {
2043 OS << "<vectorlist ";
2044 unsigned Reg = getVectorListStart();
2045 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2046 OS << Reg + i << " ";
2047 OS << ">";
2048 break;
2049 }
2050 case k_VectorIndex:
2051 OS << "<vectorindex " << getVectorIndex() << ">";
2052 break;
2053 case k_SysReg:
2054 OS << "<sysreg: " << getSysReg() << '>';
2055 break;
2056 case k_Token:
2057 OS << "'" << getToken() << "'";
2058 break;
2059 case k_SysCR:
2060 OS << "c" << getSysCR();
2061 break;
2062 case k_Prefetch: {
2063 StringRef Name = getPrefetchName();
2064 if (!Name.empty())
2065 OS << "<prfop " << Name << ">";
2066 else
2067 OS << "<prfop invalid #" << getPrefetch() << ">";
2068 break;
2069 }
2070 case k_PSBHint:
2071 OS << getPSBHintName();
2072 break;
2073 case k_Register:
2074 OS << "<register " << getReg() << ">";
2075 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2076 break;
2077 LLVM_FALLTHROUGH[[clang::fallthrough]];
2078 case k_BTIHint:
2079 OS << getBTIHintName();
2080 break;
2081 case k_ShiftExtend:
2082 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2083 << getShiftExtendAmount();
2084 if (!hasShiftExtendAmount())
2085 OS << "<imp>";
2086 OS << '>';
2087 break;
2088 }
2089}
2090
2091/// @name Auto-generated Match Functions
2092/// {
2093
2094static unsigned MatchRegisterName(StringRef Name);
2095
2096/// }
2097
2098static unsigned MatchNeonVectorRegName(StringRef Name) {
2099 return StringSwitch<unsigned>(Name.lower())
2100 .Case("v0", AArch64::Q0)
2101 .Case("v1", AArch64::Q1)
2102 .Case("v2", AArch64::Q2)
2103 .Case("v3", AArch64::Q3)
2104 .Case("v4", AArch64::Q4)
2105 .Case("v5", AArch64::Q5)
2106 .Case("v6", AArch64::Q6)
2107 .Case("v7", AArch64::Q7)
2108 .Case("v8", AArch64::Q8)
2109 .Case("v9", AArch64::Q9)
2110 .Case("v10", AArch64::Q10)
2111 .Case("v11", AArch64::Q11)
2112 .Case("v12", AArch64::Q12)
2113 .Case("v13", AArch64::Q13)
2114 .Case("v14", AArch64::Q14)
2115 .Case("v15", AArch64::Q15)
2116 .Case("v16", AArch64::Q16)
2117 .Case("v17", AArch64::Q17)
2118 .Case("v18", AArch64::Q18)
2119 .Case("v19", AArch64::Q19)
2120 .Case("v20", AArch64::Q20)
2121 .Case("v21", AArch64::Q21)
2122 .Case("v22", AArch64::Q22)
2123 .Case("v23", AArch64::Q23)
2124 .Case("v24", AArch64::Q24)
2125 .Case("v25", AArch64::Q25)
2126 .Case("v26", AArch64::Q26)
2127 .Case("v27", AArch64::Q27)
2128 .Case("v28", AArch64::Q28)
2129 .Case("v29", AArch64::Q29)
2130 .Case("v30", AArch64::Q30)
2131 .Case("v31", AArch64::Q31)
2132 .Default(0);
2133}
2134
2135/// Returns an optional pair of (#elements, element-width) if Suffix
2136/// is a valid vector kind. Where the number of elements in a vector
2137/// or the vector width is implicit or explicitly unknown (but still a
2138/// valid suffix kind), 0 is used.
2139static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2140 RegKind VectorKind) {
2141 std::pair<int, int> Res = {-1, -1};
2142
2143 switch (VectorKind) {
2144 case RegKind::NeonVector:
2145 Res =
2146 StringSwitch<std::pair<int, int>>(Suffix.lower())
2147 .Case("", {0, 0})
2148 .Case(".1d", {1, 64})
2149 .Case(".1q", {1, 128})
2150 // '.2h' needed for fp16 scalar pairwise reductions
2151 .Case(".2h", {2, 16})
2152 .Case(".2s", {2, 32})
2153 .Case(".2d", {2, 64})
2154 // '.4b' is another special case for the ARMv8.2a dot product
2155 // operand
2156 .Case(".4b", {4, 8})
2157 .Case(".4h", {4, 16})
2158 .Case(".4s", {4, 32})
2159 .Case(".8b", {8, 8})
2160 .Case(".8h", {8, 16})
2161 .Case(".16b", {16, 8})
2162 // Accept the width neutral ones, too, for verbose syntax. If those
2163 // aren't used in the right places, the token operand won't match so
2164 // all will work out.
2165 .Case(".b", {0, 8})
2166 .Case(".h", {0, 16})
2167 .Case(".s", {0, 32})
2168 .Case(".d", {0, 64})
2169 .Default({-1, -1});
2170 break;
2171 case RegKind::SVEPredicateVector:
2172 case RegKind::SVEDataVector:
2173 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2174 .Case("", {0, 0})
2175 .Case(".b", {0, 8})
2176 .Case(".h", {0, 16})
2177 .Case(".s", {0, 32})
2178 .Case(".d", {0, 64})
2179 .Case(".q", {0, 128})
2180 .Default({-1, -1});
2181 break;
2182 default:
2183 llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2183)
;
2184 }
2185
2186 if (Res == std::make_pair(-1, -1))
2187 return Optional<std::pair<int, int>>();
2188
2189 return Optional<std::pair<int, int>>(Res);
2190}
2191
2192static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2193 return parseVectorKind(Suffix, VectorKind).hasValue();
2194}
2195
2196static unsigned matchSVEDataVectorRegName(StringRef Name) {
2197 return StringSwitch<unsigned>(Name.lower())
2198 .Case("z0", AArch64::Z0)
2199 .Case("z1", AArch64::Z1)
2200 .Case("z2", AArch64::Z2)
2201 .Case("z3", AArch64::Z3)
2202 .Case("z4", AArch64::Z4)
2203 .Case("z5", AArch64::Z5)
2204 .Case("z6", AArch64::Z6)
2205 .Case("z7", AArch64::Z7)
2206 .Case("z8", AArch64::Z8)
2207 .Case("z9", AArch64::Z9)
2208 .Case("z10", AArch64::Z10)
2209 .Case("z11", AArch64::Z11)
2210 .Case("z12", AArch64::Z12)
2211 .Case("z13", AArch64::Z13)
2212 .Case("z14", AArch64::Z14)
2213 .Case("z15", AArch64::Z15)
2214 .Case("z16", AArch64::Z16)
2215 .Case("z17", AArch64::Z17)
2216 .Case("z18", AArch64::Z18)
2217 .Case("z19", AArch64::Z19)
2218 .Case("z20", AArch64::Z20)
2219 .Case("z21", AArch64::Z21)
2220 .Case("z22", AArch64::Z22)
2221 .Case("z23", AArch64::Z23)
2222 .Case("z24", AArch64::Z24)
2223 .Case("z25", AArch64::Z25)
2224 .Case("z26", AArch64::Z26)
2225 .Case("z27", AArch64::Z27)
2226 .Case("z28", AArch64::Z28)
2227 .Case("z29", AArch64::Z29)
2228 .Case("z30", AArch64::Z30)
2229 .Case("z31", AArch64::Z31)
2230 .Default(0);
2231}
2232
2233static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2234 return StringSwitch<unsigned>(Name.lower())
2235 .Case("p0", AArch64::P0)
2236 .Case("p1", AArch64::P1)
2237 .Case("p2", AArch64::P2)
2238 .Case("p3", AArch64::P3)
2239 .Case("p4", AArch64::P4)
2240 .Case("p5", AArch64::P5)
2241 .Case("p6", AArch64::P6)
2242 .Case("p7", AArch64::P7)
2243 .Case("p8", AArch64::P8)
2244 .Case("p9", AArch64::P9)
2245 .Case("p10", AArch64::P10)
2246 .Case("p11", AArch64::P11)
2247 .Case("p12", AArch64::P12)
2248 .Case("p13", AArch64::P13)
2249 .Case("p14", AArch64::P14)
2250 .Case("p15", AArch64::P15)
2251 .Default(0);
2252}
2253
2254bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2255 SMLoc &EndLoc) {
2256 StartLoc = getLoc();
2257 auto Res = tryParseScalarRegister(RegNo);
2258 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2259 return Res != MatchOperand_Success;
2260}
2261
2262// Matches a register name or register alias previously defined by '.req'
2263unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2264 RegKind Kind) {
2265 unsigned RegNum = 0;
2266 if ((RegNum = matchSVEDataVectorRegName(Name)))
2267 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2268
2269 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2270 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2271
2272 if ((RegNum = MatchNeonVectorRegName(Name)))
2273 return Kind == RegKind::NeonVector ? RegNum : 0;
2274
2275 // The parsed register must be of RegKind Scalar
2276 if ((RegNum = MatchRegisterName(Name)))
2277 return Kind == RegKind::Scalar ? RegNum : 0;
2278
2279 if (!RegNum) {
2280 // Handle a few common aliases of registers.
2281 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2282 .Case("fp", AArch64::FP)
2283 .Case("lr", AArch64::LR)
2284 .Case("x31", AArch64::XZR)
2285 .Case("w31", AArch64::WZR)
2286 .Default(0))
2287 return Kind == RegKind::Scalar ? RegNum : 0;
2288
2289 // Check for aliases registered via .req. Canonicalize to lower case.
2290 // That's more consistent since register names are case insensitive, and
2291 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2292 auto Entry = RegisterReqs.find(Name.lower());
2293 if (Entry == RegisterReqs.end())
2294 return 0;
2295
2296 // set RegNum if the match is the right kind of register
2297 if (Kind == Entry->getValue().first)
2298 RegNum = Entry->getValue().second;
2299 }
2300 return RegNum;
2301}
2302
2303/// tryParseScalarRegister - Try to parse a register name. The token must be an
2304/// Identifier when called, and if it is a register name the token is eaten and
2305/// the register is added to the operand list.
2306OperandMatchResultTy
2307AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2308 MCAsmParser &Parser = getParser();
2309 const AsmToken &Tok = Parser.getTok();
2310 if (Tok.isNot(AsmToken::Identifier))
2311 return MatchOperand_NoMatch;
2312
2313 std::string lowerCase = Tok.getString().lower();
2314 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2315 if (Reg == 0)
2316 return MatchOperand_NoMatch;
2317
2318 RegNum = Reg;
2319 Parser.Lex(); // Eat identifier token.
2320 return MatchOperand_Success;
2321}
2322
2323/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2324OperandMatchResultTy
2325AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2326 MCAsmParser &Parser = getParser();
2327 SMLoc S = getLoc();
2328
2329 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2330 Error(S, "Expected cN operand where 0 <= N <= 15");
2331 return MatchOperand_ParseFail;
2332 }
2333
2334 StringRef Tok = Parser.getTok().getIdentifier();
2335 if (Tok[0] != 'c' && Tok[0] != 'C') {
2336 Error(S, "Expected cN operand where 0 <= N <= 15");
2337 return MatchOperand_ParseFail;
2338 }
2339
2340 uint32_t CRNum;
2341 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2342 if (BadNum || CRNum > 15) {
2343 Error(S, "Expected cN operand where 0 <= N <= 15");
2344 return MatchOperand_ParseFail;
2345 }
2346
2347 Parser.Lex(); // Eat identifier token.
2348 Operands.push_back(
2349 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2350 return MatchOperand_Success;
2351}
2352
2353/// tryParsePrefetch - Try to parse a prefetch operand.
2354template <bool IsSVEPrefetch>
2355OperandMatchResultTy
2356AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2357 MCAsmParser &Parser = getParser();
2358 SMLoc S = getLoc();
2359 const AsmToken &Tok = Parser.getTok();
2360
2361 auto LookupByName = [](StringRef N) {
2362 if (IsSVEPrefetch) {
2363 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2364 return Optional<unsigned>(Res->Encoding);
2365 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2366 return Optional<unsigned>(Res->Encoding);
2367 return Optional<unsigned>();
2368 };
2369
2370 auto LookupByEncoding = [](unsigned E) {
2371 if (IsSVEPrefetch) {
2372 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2373 return Optional<StringRef>(Res->Name);
2374 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2375 return Optional<StringRef>(Res->Name);
2376 return Optional<StringRef>();
2377 };
2378 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2379
2380 // Either an identifier for named values or a 5-bit immediate.
2381 // Eat optional hash.
2382 if (parseOptionalToken(AsmToken::Hash) ||
2383 Tok.is(AsmToken::Integer)) {
2384 const MCExpr *ImmVal;
2385 if (getParser().parseExpression(ImmVal))
2386 return MatchOperand_ParseFail;
2387
2388 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2389 if (!MCE) {
2390 TokError("immediate value expected for prefetch operand");
2391 return MatchOperand_ParseFail;
2392 }
2393 unsigned prfop = MCE->getValue();
2394 if (prfop > MaxVal) {
2395 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2396 "] expected");
2397 return MatchOperand_ParseFail;
2398 }
2399
2400 auto PRFM = LookupByEncoding(MCE->getValue());
2401 Operands.push_back(AArch64Operand::CreatePrefetch(
2402 prfop, PRFM.getValueOr(""), S, getContext()));
2403 return MatchOperand_Success;
2404 }
2405
2406 if (Tok.isNot(AsmToken::Identifier)) {
2407 TokError("prefetch hint expected");
2408 return MatchOperand_ParseFail;
2409 }
2410
2411 auto PRFM = LookupByName(Tok.getString());
2412 if (!PRFM) {
2413 TokError("prefetch hint expected");
2414 return MatchOperand_ParseFail;
2415 }
2416
2417 Parser.Lex(); // Eat identifier token.
2418 Operands.push_back(AArch64Operand::CreatePrefetch(
2419 *PRFM, Tok.getString(), S, getContext()));
2420 return MatchOperand_Success;
2421}
2422
2423/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2424OperandMatchResultTy
2425AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2426 MCAsmParser &Parser = getParser();
2427 SMLoc S = getLoc();
2428 const AsmToken &Tok = Parser.getTok();
2429 if (Tok.isNot(AsmToken::Identifier)) {
2430 TokError("invalid operand for instruction");
2431 return MatchOperand_ParseFail;
2432 }
2433
2434 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2435 if (!PSB) {
2436 TokError("invalid operand for instruction");
2437 return MatchOperand_ParseFail;
2438 }
2439
2440 Parser.Lex(); // Eat identifier token.
2441 Operands.push_back(AArch64Operand::CreatePSBHint(
2442 PSB->Encoding, Tok.getString(), S, getContext()));
2443 return MatchOperand_Success;
2444}
2445
2446/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2447OperandMatchResultTy
2448AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2449 MCAsmParser &Parser = getParser();
2450 SMLoc S = getLoc();
2451 const AsmToken &Tok = Parser.getTok();
2452 if (Tok.isNot(AsmToken::Identifier)) {
2453 TokError("invalid operand for instruction");
2454 return MatchOperand_ParseFail;
2455 }
2456
2457 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2458 if (!BTI) {
2459 TokError("invalid operand for instruction");
2460 return MatchOperand_ParseFail;
2461 }
2462
2463 Parser.Lex(); // Eat identifier token.
2464 Operands.push_back(AArch64Operand::CreateBTIHint(
2465 BTI->Encoding, Tok.getString(), S, getContext()));
2466 return MatchOperand_Success;
2467}
2468
2469/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2470/// instruction.
2471OperandMatchResultTy
2472AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2473 MCAsmParser &Parser = getParser();
2474 SMLoc S = getLoc();
2475 const MCExpr *Expr;
2476
2477 if (Parser.getTok().is(AsmToken::Hash)) {
2478 Parser.Lex(); // Eat hash token.
2479 }
2480
2481 if (parseSymbolicImmVal(Expr))
2482 return MatchOperand_ParseFail;
2483
2484 AArch64MCExpr::VariantKind ELFRefKind;
2485 MCSymbolRefExpr::VariantKind DarwinRefKind;
2486 int64_t Addend;
2487 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2488 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2489 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2490 // No modifier was specified at all; this is the syntax for an ELF basic
2491 // ADRP relocation (unfortunately).
2492 Expr =
2493 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2494 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2495 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2496 Addend != 0) {
2497 Error(S, "gotpage label reference not allowed an addend");
2498 return MatchOperand_ParseFail;
2499 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2500 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2501 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2502 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2503 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2504 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2505 // The operand must be an @page or @gotpage qualified symbolref.
2506 Error(S, "page or gotpage label reference expected");
2507 return MatchOperand_ParseFail;
2508 }
2509 }
2510
2511 // We have either a label reference possibly with addend or an immediate. The
2512 // addend is a raw value here. The linker will adjust it to only reference the
2513 // page.
2514 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2515 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2516
2517 return MatchOperand_Success;
2518}
2519
2520/// tryParseAdrLabel - Parse and validate a source label for the ADR
2521/// instruction.
2522OperandMatchResultTy
2523AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2524 SMLoc S = getLoc();
2525 const MCExpr *Expr;
1
'Expr' declared without an initial value
2526
2527 // Leave anything with a bracket to the default for SVE
2528 if (getParser().getTok().is(AsmToken::LBrac))
2
Taking false branch
2529 return MatchOperand_NoMatch;
2530
2531 if (getParser().getTok().is(AsmToken::Hash))
3
Taking false branch
2532 getParser().Lex(); // Eat hash token.
2533
2534 if (parseSymbolicImmVal(Expr))
4
Calling 'AArch64AsmParser::parseSymbolicImmVal'
9
Returning from 'AArch64AsmParser::parseSymbolicImmVal'
10
Assuming the condition is false
11
Taking false branch
2535 return MatchOperand_ParseFail;
2536
2537 AArch64MCExpr::VariantKind ELFRefKind;
2538 MCSymbolRefExpr::VariantKind DarwinRefKind;
2539 int64_t Addend;
2540 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
12
1st function call argument is an uninitialized value
2541 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2542 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2543 // No modifier was specified at all; this is the syntax for an ELF basic
2544 // ADR relocation (unfortunately).
2545 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2546 } else {
2547 Error(S, "unexpected adr label");
2548 return MatchOperand_ParseFail;
2549 }
2550 }
2551
2552 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2553 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2554 return MatchOperand_Success;
2555}
2556
2557/// tryParseFPImm - A floating point immediate expression operand.
2558template<bool AddFPZeroAsLiteral>
2559OperandMatchResultTy
2560AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2561 MCAsmParser &Parser = getParser();
2562 SMLoc S = getLoc();
2563
2564 bool Hash = parseOptionalToken(AsmToken::Hash);
2565
2566 // Handle negation, as that still comes through as a separate token.
2567 bool isNegative = parseOptionalToken(AsmToken::Minus);
2568
2569 const AsmToken &Tok = Parser.getTok();
2570 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2571 if (!Hash)
2572 return MatchOperand_NoMatch;
2573 TokError("invalid floating point immediate");
2574 return MatchOperand_ParseFail;
2575 }
2576
2577 // Parse hexadecimal representation.
2578 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2579 if (Tok.getIntVal() > 255 || isNegative) {
2580 TokError("encoded floating point value out of range");
2581 return MatchOperand_ParseFail;
2582 }
2583
2584 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2585 Operands.push_back(
2586 AArch64Operand::CreateFPImm(F, true, S, getContext()));
2587 } else {
2588 // Parse FP representation.
2589 APFloat RealVal(APFloat::IEEEdouble());
2590 auto Status =
2591 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2592 if (isNegative)
2593 RealVal.changeSign();
2594
2595 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2596 Operands.push_back(
2597 AArch64Operand::CreateToken("#0", false, S, getContext()));
2598 Operands.push_back(
2599 AArch64Operand::CreateToken(".0", false, S, getContext()));
2600 } else
2601 Operands.push_back(AArch64Operand::CreateFPImm(
2602 RealVal, Status == APFloat::opOK, S, getContext()));
2603 }
2604
2605 Parser.Lex(); // Eat the token.
2606
2607 return MatchOperand_Success;
2608}
2609
2610/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2611/// a shift suffix, for example '#1, lsl #12'.
2612OperandMatchResultTy
2613AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2614 MCAsmParser &Parser = getParser();
2615 SMLoc S = getLoc();
2616
2617 if (Parser.getTok().is(AsmToken::Hash))
2618 Parser.Lex(); // Eat '#'
2619 else if (Parser.getTok().isNot(AsmToken::Integer))
2620 // Operand should start from # or should be integer, emit error otherwise.
2621 return MatchOperand_NoMatch;
2622
2623 const MCExpr *Imm;
2624 if (parseSymbolicImmVal(Imm))
2625 return MatchOperand_ParseFail;
2626 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2627 SMLoc E = Parser.getTok().getLoc();
2628 Operands.push_back(
2629 AArch64Operand::CreateImm(Imm, S, E, getContext()));
2630 return MatchOperand_Success;
2631 }
2632
2633 // Eat ','
2634 Parser.Lex();
2635
2636 // The optional operand must be "lsl #N" where N is non-negative.
2637 if (!Parser.getTok().is(AsmToken::Identifier) ||
2638 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2639 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2640 return MatchOperand_ParseFail;
2641 }
2642
2643 // Eat 'lsl'
2644 Parser.Lex();
2645
2646 parseOptionalToken(AsmToken::Hash);
2647
2648 if (Parser.getTok().isNot(AsmToken::Integer)) {
2649 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2650 return MatchOperand_ParseFail;
2651 }
2652
2653 int64_t ShiftAmount = Parser.getTok().getIntVal();
2654
2655 if (ShiftAmount < 0) {
2656 Error(Parser.getTok().getLoc(), "positive shift amount required");
2657 return MatchOperand_ParseFail;
2658 }
2659 Parser.Lex(); // Eat the number
2660
2661 // Just in case the optional lsl #0 is used for immediates other than zero.
2662 if (ShiftAmount == 0 && Imm != 0) {
2663 SMLoc E = Parser.getTok().getLoc();
2664 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2665 return MatchOperand_Success;
2666 }
2667
2668 SMLoc E = Parser.getTok().getLoc();
2669 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2670 S, E, getContext()));
2671 return MatchOperand_Success;
2672}
2673
2674/// parseCondCodeString - Parse a Condition Code string.
2675AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2676 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2677 .Case("eq", AArch64CC::EQ)
2678 .Case("ne", AArch64CC::NE)
2679 .Case("cs", AArch64CC::HS)
2680 .Case("hs", AArch64CC::HS)
2681 .Case("cc", AArch64CC::LO)
2682 .Case("lo", AArch64CC::LO)
2683 .Case("mi", AArch64CC::MI)
2684 .Case("pl", AArch64CC::PL)
2685 .Case("vs", AArch64CC::VS)
2686 .Case("vc", AArch64CC::VC)
2687 .Case("hi", AArch64CC::HI)
2688 .Case("ls", AArch64CC::LS)
2689 .Case("ge", AArch64CC::GE)
2690 .Case("lt", AArch64CC::LT)
2691 .Case("gt", AArch64CC::GT)
2692 .Case("le", AArch64CC::LE)
2693 .Case("al", AArch64CC::AL)
2694 .Case("nv", AArch64CC::NV)
2695 .Default(AArch64CC::Invalid);
2696
2697 if (CC == AArch64CC::Invalid &&
2698 getSTI().getFeatureBits()[AArch64::FeatureSVE])
2699 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2700 .Case("none", AArch64CC::EQ)
2701 .Case("any", AArch64CC::NE)
2702 .Case("nlast", AArch64CC::HS)
2703 .Case("last", AArch64CC::LO)
2704 .Case("first", AArch64CC::MI)
2705 .Case("nfrst", AArch64CC::PL)
2706 .Case("pmore", AArch64CC::HI)
2707 .Case("plast", AArch64CC::LS)
2708 .Case("tcont", AArch64CC::GE)
2709 .Case("tstop", AArch64CC::LT)
2710 .Default(AArch64CC::Invalid);
2711
2712 return CC;
2713}
2714
2715/// parseCondCode - Parse a Condition Code operand.
2716bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2717 bool invertCondCode) {
2718 MCAsmParser &Parser = getParser();
2719 SMLoc S = getLoc();
2720 const AsmToken &Tok = Parser.getTok();
2721 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")((Tok.is(AsmToken::Identifier) && "Token is not an Identifier"
) ? static_cast<void> (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2721, __PRETTY_FUNCTION__))
;
2722
2723 StringRef Cond = Tok.getString();
2724 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2725 if (CC == AArch64CC::Invalid)
2726 return TokError("invalid condition code");
2727 Parser.Lex(); // Eat identifier token.
2728
2729 if (invertCondCode) {
2730 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2731 return TokError("condition codes AL and NV are invalid for this instruction");
2732 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2733 }
2734
2735 Operands.push_back(
2736 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2737 return false;
2738}
2739
2740/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2741/// them if present.
2742OperandMatchResultTy
2743AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2744 MCAsmParser &Parser = getParser();
2745 const AsmToken &Tok = Parser.getTok();
2746 std::string LowerID = Tok.getString().lower();
2747 AArch64_AM::ShiftExtendType ShOp =
2748 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2749 .Case("lsl", AArch64_AM::LSL)
2750 .Case("lsr", AArch64_AM::LSR)
2751 .Case("asr", AArch64_AM::ASR)
2752 .Case("ror", AArch64_AM::ROR)
2753 .Case("msl", AArch64_AM::MSL)
2754 .Case("uxtb", AArch64_AM::UXTB)
2755 .Case("uxth", AArch64_AM::UXTH)
2756 .Case("uxtw", AArch64_AM::UXTW)
2757 .Case("uxtx", AArch64_AM::UXTX)
2758 .Case("sxtb", AArch64_AM::SXTB)
2759 .Case("sxth", AArch64_AM::SXTH)
2760 .Case("sxtw", AArch64_AM::SXTW)
2761 .Case("sxtx", AArch64_AM::SXTX)
2762 .Default(AArch64_AM::InvalidShiftExtend);
2763
2764 if (ShOp == AArch64_AM::InvalidShiftExtend)
2765 return MatchOperand_NoMatch;
2766
2767 SMLoc S = Tok.getLoc();
2768 Parser.Lex();
2769
2770 bool Hash = parseOptionalToken(AsmToken::Hash);
2771
2772 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2773 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2774 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2775 ShOp == AArch64_AM::MSL) {
2776 // We expect a number here.
2777 TokError("expected #imm after shift specifier");
2778 return MatchOperand_ParseFail;
2779 }
2780
2781 // "extend" type operations don't need an immediate, #0 is implicit.
2782 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2783 Operands.push_back(
2784 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2785 return MatchOperand_Success;
2786 }
2787
2788 // Make sure we do actually have a number, identifier or a parenthesized
2789 // expression.
2790 SMLoc E = Parser.getTok().getLoc();
2791 if (!Parser.getTok().is(AsmToken::Integer) &&
2792 !Parser.getTok().is(AsmToken::LParen) &&
2793 !Parser.getTok().is(AsmToken::Identifier)) {
2794 Error(E, "expected integer shift amount");
2795 return MatchOperand_ParseFail;
2796 }
2797
2798 const MCExpr *ImmVal;
2799 if (getParser().parseExpression(ImmVal))
2800 return MatchOperand_ParseFail;
2801
2802 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2803 if (!MCE) {
2804 Error(E, "expected constant '#imm' after shift specifier");
2805 return MatchOperand_ParseFail;
2806 }
2807
2808 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2809 Operands.push_back(AArch64Operand::CreateShiftExtend(
2810 ShOp, MCE->getValue(), true, S, E, getContext()));
2811 return MatchOperand_Success;
2812}
2813
2814static const struct Extension {
2815 const char *Name;
2816 const FeatureBitset Features;
2817} ExtensionMap[] = {
2818 {"crc", {AArch64::FeatureCRC}},
2819 {"sm4", {AArch64::FeatureSM4}},
2820 {"sha3", {AArch64::FeatureSHA3}},
2821 {"sha2", {AArch64::FeatureSHA2}},
2822 {"aes", {AArch64::FeatureAES}},
2823 {"crypto", {AArch64::FeatureCrypto}},
2824 {"fp", {AArch64::FeatureFPARMv8}},
2825 {"simd", {AArch64::FeatureNEON}},
2826 {"ras", {AArch64::FeatureRAS}},
2827 {"lse", {AArch64::FeatureLSE}},
2828 {"predctrl", {AArch64::FeaturePredCtrl}},
2829 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
2830 {"mte", {AArch64::FeatureMTE}},
2831 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
2832 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
2833 {"ccpp", {AArch64::FeatureCCPP}},
2834 // FIXME: Unsupported extensions
2835 {"pan", {}},
2836 {"lor", {}},
2837 {"rdma", {}},
2838 {"profile", {}},
2839};
2840
2841static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2842 if (FBS[AArch64::HasV8_1aOps])
2843 Str += "ARMv8.1a";
2844 else if (FBS[AArch64::HasV8_2aOps])
2845 Str += "ARMv8.2a";
2846 else if (FBS[AArch64::HasV8_3aOps])
2847 Str += "ARMv8.3a";
2848 else if (FBS[AArch64::HasV8_4aOps])
2849 Str += "ARMv8.4a";
2850 else if (FBS[AArch64::HasV8_5aOps])
2851 Str += "ARMv8.5a";
2852 else {
2853 auto ext = std::find_if(std::begin(ExtensionMap),
2854 std::end(ExtensionMap),
2855 [&](const Extension& e)
2856 // Use & in case multiple features are enabled
2857 { return (FBS & e.Features) != FeatureBitset(); }
2858 );
2859
2860 Str += ext != std::end(ExtensionMap) ? ext->Name : "(unknown)";
2861 }
2862}
2863
2864void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2865 SMLoc S) {
2866 const uint16_t Op2 = Encoding & 7;
2867 const uint16_t Cm = (Encoding & 0x78) >> 3;
2868 const uint16_t Cn = (Encoding & 0x780) >> 7;
2869 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2870
2871 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2872
2873 Operands.push_back(
2874 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2875 Operands.push_back(
2876 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2877 Operands.push_back(
2878 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2879 Expr = MCConstantExpr::create(Op2, getContext());
2880 Operands.push_back(
2881 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2882}
2883
2884/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2885/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2886bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2887 OperandVector &Operands) {
2888 if (Name.find('.') != StringRef::npos)
2889 return TokError("invalid operand");
2890
2891 Mnemonic = Name;
2892 Operands.push_back(
2893 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2894
2895 MCAsmParser &Parser = getParser();
2896 const AsmToken &Tok = Parser.getTok();
2897 StringRef Op = Tok.getString();
2898 SMLoc S = Tok.getLoc();
2899
2900 if (Mnemonic == "ic") {
2901 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2902 if (!IC)
2903 return TokError("invalid operand for IC instruction");
2904 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2905 std::string Str("IC " + std::string(IC->Name) + " requires ");
2906 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2907 return TokError(Str.c_str());
2908 }
2909 createSysAlias(IC->Encoding, Operands, S);
2910 } else if (Mnemonic == "dc") {
2911 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2912 if (!DC)
2913 return TokError("invalid operand for DC instruction");
2914 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2915 std::string Str("DC " + std::string(DC->Name) + " requires ");
2916 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
2917 return TokError(Str.c_str());
2918 }
2919 createSysAlias(DC->Encoding, Operands, S);
2920 } else if (Mnemonic == "at") {
2921 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2922 if (!AT)
2923 return TokError("invalid operand for AT instruction");
2924 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2925 std::string Str("AT " + std::string(AT->Name) + " requires ");
2926 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
2927 return TokError(Str.c_str());
2928 }
2929 createSysAlias(AT->Encoding, Operands, S);
2930 } else if (Mnemonic == "tlbi") {
2931 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2932 if (!TLBI)
2933 return TokError("invalid operand for TLBI instruction");
2934 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2935 std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2936 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
2937 return TokError(Str.c_str());
2938 }
2939 createSysAlias(TLBI->Encoding, Operands, S);
2940 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
2941 const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
2942 if (!PRCTX)
2943 return TokError("invalid operand for prediction restriction instruction");
2944 else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
2945 std::string Str(
2946 Mnemonic.upper() + std::string(PRCTX->Name) + " requires ");
2947 setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
2948 return TokError(Str.c_str());
2949 }
2950 uint16_t PRCTX_Op2 =
2951 Mnemonic == "cfp" ? 4 :
2952 Mnemonic == "dvp" ? 5 :
2953 Mnemonic == "cpp" ? 7 :
2954 0;
2955 assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction")((PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction"
) ? static_cast<void> (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2955, __PRETTY_FUNCTION__))
;
2956 createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
2957 }
2958
2959 Parser.Lex(); // Eat operand.
2960
2961 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2962 bool HasRegister = false;
2963
2964 // Check for the optional register operand.
2965 if (parseOptionalToken(AsmToken::Comma)) {
2966 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2967 return TokError("expected register operand");
2968 HasRegister = true;
2969 }
2970
2971 if (ExpectRegister && !HasRegister)
2972 return TokError("specified " + Mnemonic + " op requires a register");
2973 else if (!ExpectRegister && HasRegister)
2974 return TokError("specified " + Mnemonic + " op does not use a register");
2975
2976 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2977 return true;
2978
2979 return false;
2980}
2981
2982OperandMatchResultTy
2983AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2984 MCAsmParser &Parser = getParser();
2985 const AsmToken &Tok = Parser.getTok();
2986
2987 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
2988 TokError("'csync' operand expected");
2989 return MatchOperand_ParseFail;
2990 // Can be either a #imm style literal or an option name
2991 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
2992 // Immediate operand.
2993 const MCExpr *ImmVal;
2994 SMLoc ExprLoc = getLoc();
2995 if (getParser().parseExpression(ImmVal))
2996 return MatchOperand_ParseFail;
2997 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2998 if (!MCE) {
2999 Error(ExprLoc, "immediate value expected for barrier operand");
3000 return MatchOperand_ParseFail;
3001 }
3002 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
3003 Error(ExprLoc, "barrier operand out of range");
3004 return MatchOperand_ParseFail;
3005 }
3006 auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
3007 Operands.push_back(AArch64Operand::CreateBarrier(
3008 MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
3009 return MatchOperand_Success;
3010 }
3011
3012 if (Tok.isNot(AsmToken::Identifier)) {
3013 TokError("invalid operand for instruction");
3014 return MatchOperand_ParseFail;
3015 }
3016
3017 auto TSB = AArch64TSB::lookupTSBByName(Tok.getString());
3018 // The only valid named option for ISB is 'sy'
3019 auto DB = AArch64DB::lookupDBByName(Tok.getString());
3020 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3021 TokError("'sy' or #imm operand expected");
3022 return MatchOperand_ParseFail;
3023 // The only valid named option for TSB is 'csync'
3024 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3025 TokError("'csync' operand expected");
3026 return MatchOperand_ParseFail;
3027 } else if (!DB && !TSB) {
3028 TokError("invalid barrier option name");
3029 return MatchOperand_ParseFail;
3030 }
3031
3032 Operands.push_back(AArch64Operand::CreateBarrier(
3033 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext()));
3034 Parser.Lex(); // Consume the option
3035
3036 return MatchOperand_Success;
3037}
3038
3039OperandMatchResultTy
3040AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3041 MCAsmParser &Parser = getParser();
3042 const AsmToken &Tok = Parser.getTok();
3043
3044 if (Tok.isNot(AsmToken::Identifier))
3045 return MatchOperand_NoMatch;
3046
3047 int MRSReg, MSRReg;
3048 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3049 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3050 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3051 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3052 } else
3053 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3054
3055 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3056 unsigned PStateImm = -1;
3057 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3058 PStateImm = PState->Encoding;
3059
3060 Operands.push_back(
3061 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3062 PStateImm, getContext()));
3063 Parser.Lex(); // Eat identifier
3064
3065 return MatchOperand_Success;
3066}
3067
3068/// tryParseNeonVectorRegister - Parse a vector register operand.
3069bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3070 MCAsmParser &Parser = getParser();
3071 if (Parser.getTok().isNot(AsmToken::Identifier))
3072 return true;
3073
3074 SMLoc S = getLoc();
3075 // Check for a vector register specifier first.
3076 StringRef Kind;
3077 unsigned Reg;
3078 OperandMatchResultTy Res =
3079 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3080 if (Res != MatchOperand_Success)
3081 return true;
3082
3083 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3084 if (!KindRes)
3085 return true;
3086
3087 unsigned ElementWidth = KindRes->second;
3088 Operands.push_back(
3089 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3090 S, getLoc(), getContext()));
3091
3092 // If there was an explicit qualifier, that goes on as a literal text
3093 // operand.
3094 if (!Kind.empty())
3095 Operands.push_back(
3096 AArch64Operand::CreateToken(Kind, false, S, getContext()));
3097
3098 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3099}
3100
3101OperandMatchResultTy
3102AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3103 SMLoc SIdx = getLoc();
3104 if (parseOptionalToken(AsmToken::LBrac)) {
3105 const MCExpr *ImmVal;
3106 if (getParser().parseExpression(ImmVal))
3107 return MatchOperand_NoMatch;
3108 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3109 if (!MCE) {
3110 TokError("immediate value expected for vector index");
3111 return MatchOperand_ParseFail;;
3112 }
3113
3114 SMLoc E = getLoc();
3115
3116 if (parseToken(AsmToken::RBrac, "']' expected"))
3117 return MatchOperand_ParseFail;;
3118
3119 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3120 E, getContext()));
3121 return MatchOperand_Success;
3122 }
3123
3124 return MatchOperand_NoMatch;
3125}
3126
3127// tryParseVectorRegister - Try to parse a vector register name with
3128// optional kind specifier. If it is a register specifier, eat the token
3129// and return it.
3130OperandMatchResultTy
3131AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3132 RegKind MatchKind) {
3133 MCAsmParser &Parser = getParser();
3134 const AsmToken &Tok = Parser.getTok();
3135
3136 if (Tok.isNot(AsmToken::Identifier))
3137 return MatchOperand_NoMatch;
3138
3139 StringRef Name = Tok.getString();
3140 // If there is a kind specifier, it's separated from the register name by
3141 // a '.'.
3142 size_t Start = 0, Next = Name.find('.');
3143 StringRef Head = Name.slice(Start, Next);
3144 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3145
3146 if (RegNum) {
3147 if (Next != StringRef::npos) {
3148 Kind = Name.slice(Next, StringRef::npos);
3149 if (!isValidVectorKind(Kind, MatchKind)) {
3150 TokError("invalid vector kind qualifier");
3151 return MatchOperand_ParseFail;
3152 }
3153 }
3154 Parser.Lex(); // Eat the register token.
3155
3156 Reg = RegNum;
3157 return MatchOperand_Success;
3158 }
3159
3160 return MatchOperand_NoMatch;
3161}
3162
3163/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3164OperandMatchResultTy
3165AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3166 // Check for a SVE predicate register specifier first.
3167 const SMLoc S = getLoc();
3168 StringRef Kind;
3169 unsigned RegNum;
3170 auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3171 if (Res != MatchOperand_Success)
3172 return Res;
3173
3174 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3175 if (!KindRes)
3176 return MatchOperand_NoMatch;
3177
3178 unsigned ElementWidth = KindRes->second;
3179 Operands.push_back(AArch64Operand::CreateVectorReg(
3180 RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3181 getLoc(), getContext()));
3182
3183 // Not all predicates are followed by a '/m' or '/z'.
3184 MCAsmParser &Parser = getParser();
3185 if (Parser.getTok().isNot(AsmToken::Slash))
3186 return MatchOperand_Success;
3187
3188 // But when they do they shouldn't have an element type suffix.
3189 if (!Kind.empty()) {
3190 Error(S, "not expecting size suffix");
3191 return MatchOperand_ParseFail;
3192 }
3193
3194 // Add a literal slash as operand
3195 Operands.push_back(
3196 AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3197
3198 Parser.Lex(); // Eat the slash.
3199
3200 // Zeroing or merging?
3201 auto Pred = Parser.getTok().getString().lower();
3202 if (Pred != "z" && Pred != "m") {
3203 Error(getLoc(), "expecting 'm' or 'z' predication");
3204 return MatchOperand_ParseFail;
3205 }
3206
3207 // Add zero/merge token.
3208 const char *ZM = Pred == "z" ? "z" : "m";
3209 Operands.push_back(
3210 AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3211
3212 Parser.Lex(); // Eat zero/merge token.
3213 return MatchOperand_Success;
3214}
3215
3216/// parseRegister - Parse a register operand.
3217bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3218 // Try for a Neon vector register.
3219 if (!tryParseNeonVectorRegister(Operands))
3220 return false;
3221
3222 // Otherwise try for a scalar register.
3223 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3224 return false;
3225
3226 return true;
3227}
3228
3229bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3230 MCAsmParser &Parser = getParser();
3231 bool HasELFModifier = false;
3232 AArch64MCExpr::VariantKind RefKind;
3233
3234 if (parseOptionalToken(AsmToken::Colon)) {
5
Assuming the condition is true
6
Taking true branch
3235 HasELFModifier = true;
3236
3237 if (Parser.getTok().isNot(AsmToken::Identifier))
7
Taking true branch
3238 return TokError("expect relocation specifier in operand after ':'");
8
Returning without writing to 'ImmVal'
3239
3240 std::string LowerCase = Parser.getTok().getIdentifier().lower();
3241 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3242 .Case("lo12", AArch64MCExpr::VK_LO12)
3243 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3244 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3245 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3246 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3247 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3248 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3249 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3250 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3251 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3252 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3253 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3254 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3255 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3256 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3257 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3258 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3259 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3260 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3261 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3262 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3263 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3264 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3265 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3266 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3267 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3268 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3269 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3270 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3271 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3272 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3273 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3274 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3275 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3276 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3277 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3278 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3279 .Default(AArch64MCExpr::VK_INVALID);
3280
3281 if (RefKind == AArch64MCExpr::VK_INVALID)
3282 return TokError("expect relocation specifier in operand after ':'");
3283
3284 Parser.Lex(); // Eat identifier
3285
3286 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3287 return true;
3288 }
3289
3290 if (getParser().parseExpression(ImmVal))
3291 return true;
3292
3293 if (HasELFModifier)
3294 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3295
3296 return false;
3297}
3298
3299template <RegKind VectorKind>
3300OperandMatchResultTy
3301AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3302 bool ExpectMatch) {
3303 MCAsmParser &Parser = getParser();
3304 if (!Parser.getTok().is(AsmToken::LCurly))
3305 return MatchOperand_NoMatch;
3306
3307 // Wrapper around parse function
3308 auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3309 bool NoMatchIsError) {
3310 auto RegTok = Parser.getTok();
3311 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3312 if (ParseRes == MatchOperand_Success) {
3313 if (parseVectorKind(Kind, VectorKind))
3314 return ParseRes;
3315 llvm_unreachable("Expected a valid vector kind")::llvm::llvm_unreachable_internal("Expected a valid vector kind"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3315)
;
3316 }
3317
3318 if (RegTok.isNot(AsmToken::Identifier) ||
3319 ParseRes == MatchOperand_ParseFail ||
3320 (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3321 Error(Loc, "vector register expected");
3322 return MatchOperand_ParseFail;
3323 }
3324
3325 return MatchOperand_NoMatch;
3326 };
3327
3328 SMLoc S = getLoc();
3329 auto LCurly = Parser.getTok();
3330 Parser.Lex(); // Eat left bracket token.
3331
3332 StringRef Kind;
3333 unsigned FirstReg;
3334 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3335
3336 // Put back the original left bracket if there was no match, so that
3337 // different types of list-operands can be matched (e.g. SVE, Neon).
3338 if (ParseRes == MatchOperand_NoMatch)
3339 Parser.getLexer().UnLex(LCurly);
3340
3341 if (ParseRes != MatchOperand_Success)
3342 return ParseRes;
3343
3344 int64_t PrevReg = FirstReg;
3345 unsigned Count = 1;
3346
3347 if (parseOptionalToken(AsmToken::Minus)) {
3348 SMLoc Loc = getLoc();
3349 StringRef NextKind;
3350
3351 unsigned Reg;
3352 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3353 if (ParseRes != MatchOperand_Success)
3354 return ParseRes;
3355
3356 // Any Kind suffices must match on all regs in the list.
3357 if (Kind != NextKind) {
3358 Error(Loc, "mismatched register size suffix");
3359 return MatchOperand_ParseFail;
3360 }
3361
3362 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3363
3364 if (Space == 0 || Space > 3) {
3365 Error(Loc, "invalid number of vectors");
3366 return MatchOperand_ParseFail;
3367 }
3368
3369 Count += Space;
3370 }
3371 else {
3372 while (parseOptionalToken(AsmToken::Comma)) {
3373 SMLoc Loc = getLoc();
3374 StringRef NextKind;
3375 unsigned Reg;
3376 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3377 if (ParseRes != MatchOperand_Success)
3378 return ParseRes;
3379
3380 // Any Kind suffices must match on all regs in the list.
3381 if (Kind != NextKind) {
3382 Error(Loc, "mismatched register size suffix");
3383 return MatchOperand_ParseFail;
3384 }
3385
3386 // Registers must be incremental (with wraparound at 31)
3387 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3388 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3389 Error(Loc, "registers must be sequential");
3390 return MatchOperand_ParseFail;
3391 }
3392
3393 PrevReg = Reg;
3394 ++Count;
3395 }
3396 }
3397
3398 if (parseToken(AsmToken::RCurly, "'}' expected"))
3399 return MatchOperand_ParseFail;
3400
3401 if (Count > 4) {
3402 Error(S, "invalid number of vectors");
3403 return MatchOperand_ParseFail;
3404 }
3405
3406 unsigned NumElements = 0;
3407 unsigned ElementWidth = 0;
3408 if (!Kind.empty()) {
3409 if (const auto &VK = parseVectorKind(Kind, VectorKind))
3410 std::tie(NumElements, ElementWidth) = *VK;
3411 }
3412
3413 Operands.push_back(AArch64Operand::CreateVectorList(
3414 FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3415 getContext()));
3416
3417 return MatchOperand_Success;
3418}
3419
3420/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3421bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3422 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3423 if (ParseRes != MatchOperand_Success)
3424 return true;
3425
3426 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3427}
3428
3429OperandMatchResultTy
3430AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3431 SMLoc StartLoc = getLoc();
3432
3433 unsigned RegNum;
3434 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3435 if (Res != MatchOperand_Success)
3436 return Res;
3437
3438 if (!parseOptionalToken(AsmToken::Comma)) {
3439 Operands.push_back(AArch64Operand::CreateReg(
3440 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3441 return MatchOperand_Success;
3442 }
3443
3444 parseOptionalToken(AsmToken::Hash);
3445
3446 if (getParser().getTok().isNot(AsmToken::Integer)) {
3447 Error(getLoc(), "index must be absent or #0");
3448 return MatchOperand_ParseFail;
3449 }
3450
3451 const MCExpr *ImmVal;
3452 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3453 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3454 Error(getLoc(), "index must be absent or #0");
3455 return MatchOperand_ParseFail;
3456 }
3457
3458 Operands.push_back(AArch64Operand::CreateReg(
3459 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3460 return MatchOperand_Success;
3461}
3462
3463template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3464OperandMatchResultTy
3465AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3466 SMLoc StartLoc = getLoc();
3467
3468 unsigned RegNum;
3469 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3470 if (Res != MatchOperand_Success)
3471 return Res;
3472
3473 // No shift/extend is the default.
3474 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3475 Operands.push_back(AArch64Operand::CreateReg(
3476 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3477 return MatchOperand_Success;
3478 }
3479
3480 // Eat the comma
3481 getParser().Lex();
3482
3483 // Match the shift
3484 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
3485 Res = tryParseOptionalShiftExtend(ExtOpnd);
3486 if (Res != MatchOperand_Success)
3487 return Res;
3488
3489 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3490 Operands.push_back(AArch64Operand::CreateReg(
3491 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3492 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3493 Ext->hasShiftExtendAmount()));
3494
3495 return MatchOperand_Success;
3496}
3497
3498bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3499 MCAsmParser &Parser = getParser();
3500
3501 // Some SVE instructions have a decoration after the immediate, i.e.
3502 // "mul vl". We parse them here and add tokens, which must be present in the
3503 // asm string in the tablegen instruction.
3504 bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3505 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3506 if (!Parser.getTok().getString().equals_lower("mul") ||
3507 !(NextIsVL || NextIsHash))
3508 return true;
3509
3510 Operands.push_back(
3511 AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3512 Parser.Lex(); // Eat the "mul"
3513
3514 if (NextIsVL) {
3515 Operands.push_back(
3516 AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3517 Parser.Lex(); // Eat the "vl"
3518 return false;
3519 }
3520
3521 if (NextIsHash) {
3522 Parser.Lex(); // Eat the #
3523 SMLoc S = getLoc();
3524
3525 // Parse immediate operand.
3526 const MCExpr *ImmVal;
3527 if (!Parser.parseExpression(ImmVal))
3528 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3529 Operands.push_back(AArch64Operand::CreateImm(
3530 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3531 getContext()));
3532 return MatchOperand_Success;
3533 }
3534 }
3535
3536 return Error(getLoc(), "expected 'vl' or '#<imm>'");
3537}
3538
3539/// parseOperand - Parse a arm instruction operand. For now this parses the
3540/// operand regardless of the mnemonic.
3541bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3542 bool invertCondCode) {
3543 MCAsmParser &Parser = getParser();
3544
3545 OperandMatchResultTy ResTy =
3546 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3547
3548 // Check if the current operand has a custom associated parser, if so, try to
3549 // custom parse the operand, or fallback to the general approach.
3550 if (ResTy == MatchOperand_Success)
3551 return false;
3552 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3553 // there was a match, but an error occurred, in which case, just return that
3554 // the operand parsing failed.
3555 if (ResTy == MatchOperand_ParseFail)
3556 return true;
3557
3558 // Nothing custom, so do general case parsing.
3559 SMLoc S, E;
3560 switch (getLexer().getKind()) {
3561 default: {
3562 SMLoc S = getLoc();
3563 const MCExpr *Expr;
3564 if (parseSymbolicImmVal(Expr))
3565 return Error(S, "invalid operand");
3566
3567 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3568 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3569 return false;
3570 }
3571 case AsmToken::LBrac: {
3572 SMLoc Loc = Parser.getTok().getLoc();
3573 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3574 getContext()));
3575 Parser.Lex(); // Eat '['
3576
3577 // There's no comma after a '[', so we can parse the next operand
3578 // immediately.
3579 return parseOperand(Operands, false, false);
3580 }
3581 case AsmToken::LCurly:
3582 return parseNeonVectorList(Operands);
3583 case AsmToken::Identifier: {
3584 // If we're expecting a Condition Code operand, then just parse that.
3585 if (isCondCode)
3586 return parseCondCode(Operands, invertCondCode);
3587
3588 // If it's a register name, parse it.
3589 if (!parseRegister(Operands))
3590 return false;
3591
3592 // See if this is a "mul vl" decoration or "mul #<int>" operand used
3593 // by SVE instructions.
3594 if (!parseOptionalMulOperand(Operands))
3595 return false;
3596
3597 // This could be an optional "shift" or "extend" operand.
3598 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3599 // We can only continue if no tokens were eaten.
3600 if (GotShift != MatchOperand_NoMatch)
3601 return GotShift;
3602
3603 // This was not a register so parse other operands that start with an
3604 // identifier (like labels) as expressions and create them as immediates.
3605 const MCExpr *IdVal;
3606 S = getLoc();
3607 if (getParser().parseExpression(IdVal))
3608 return true;
3609 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3610 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3611 return false;
3612 }
3613 case AsmToken::Integer:
3614 case AsmToken::Real:
3615 case AsmToken::Hash: {
3616 // #42 -> immediate.
3617 S = getLoc();
3618
3619 parseOptionalToken(AsmToken::Hash);
3620
3621 // Parse a negative sign
3622 bool isNegative = false;
3623 if (Parser.getTok().is(AsmToken::Minus)) {
3624 isNegative = true;
3625 // We need to consume this token only when we have a Real, otherwise
3626 // we let parseSymbolicImmVal take care of it
3627 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3628 Parser.Lex();
3629 }
3630
3631 // The only Real that should come through here is a literal #0.0 for
3632 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3633 // so convert the value.
3634 const AsmToken &Tok = Parser.getTok();
3635 if (Tok.is(AsmToken::Real)) {
3636 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3637 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3638 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3639 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3640 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3641 return TokError("unexpected floating point literal");
3642 else if (IntVal != 0 || isNegative)
3643 return TokError("expected floating-point constant #0.0");
3644 Parser.Lex(); // Eat the token.
3645
3646 Operands.push_back(
3647 AArch64Operand::CreateToken("#0", false, S, getContext()));
3648 Operands.push_back(
3649 AArch64Operand::CreateToken(".0", false, S, getContext()));
3650 return false;
3651 }
3652
3653 const MCExpr *ImmVal;
3654 if (parseSymbolicImmVal(ImmVal))
3655 return true;
3656
3657 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3658 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3659 return false;
3660 }
3661 case AsmToken::Equal: {
3662 SMLoc Loc = getLoc();
3663 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3664 return TokError("unexpected token in operand");
3665 Parser.Lex(); // Eat '='
3666 const MCExpr *SubExprVal;
3667 if (getParser().parseExpression(SubExprVal))
3668 return true;
3669
3670 if (Operands.size() < 2 ||
3671 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3672 return Error(Loc, "Only valid when first operand is register");
3673
3674 bool IsXReg =
3675 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3676 Operands[1]->getReg());
3677
3678 MCContext& Ctx = getContext();
3679 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3680 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3681 if (isa<MCConstantExpr>(SubExprVal)) {
3682 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3683 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3684 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3685 ShiftAmt += 16;
3686 Imm >>= 16;
3687 }
3688 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3689 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3690 Operands.push_back(AArch64Operand::CreateImm(
3691 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3692 if (ShiftAmt)
3693 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3694 ShiftAmt, true, S, E, Ctx));
3695 return false;
3696 }
3697 APInt Simm = APInt(64, Imm << ShiftAmt);
3698 // check if the immediate is an unsigned or signed 32-bit int for W regs
3699 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3700 return Error(Loc, "Immediate too large for register");
3701 }
3702 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3703 const MCExpr *CPLoc =
3704 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3705 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3706 return false;
3707 }
3708 }
3709}
3710
3711bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3712 const MCParsedAsmOperand &Op2) const {
3713 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3714 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3715 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3716 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3717 return MCTargetAsmParser::regsEqual(Op1, Op2);
3718
3719 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&((AOp1.isScalarReg() && AOp2.isScalarReg() &&
"Testing equality of non-scalar registers not supported") ? static_cast
<void> (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3720, __PRETTY_FUNCTION__))
3720 "Testing equality of non-scalar registers not supported")((AOp1.isScalarReg() && AOp2.isScalarReg() &&
"Testing equality of non-scalar registers not supported") ? static_cast
<void> (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3720, __PRETTY_FUNCTION__))
;
3721
3722 // Check if a registers match their sub/super register classes.
3723 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3724 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3725 if (AOp1.getRegEqualityTy() == EqualsSubReg)
3726 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3727 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3728 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3729 if (AOp2.getRegEqualityTy() == EqualsSubReg)
3730 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3731
3732 return false;
3733}
3734
3735/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3736/// operands.
3737bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3738 StringRef Name, SMLoc NameLoc,
3739 OperandVector &Operands) {
3740 MCAsmParser &Parser = getParser();
3741 Name = StringSwitch<StringRef>(Name.lower())
3742 .Case("beq", "b.eq")
3743 .Case("bne", "b.ne")
3744 .Case("bhs", "b.hs")
3745 .Case("bcs", "b.cs")
3746 .Case("blo", "b.lo")
3747 .Case("bcc", "b.cc")
3748 .Case("bmi", "b.mi")
3749 .Case("bpl", "b.pl")
3750 .Case("bvs", "b.vs")
3751 .Case("bvc", "b.vc")
3752 .Case("bhi", "b.hi")
3753 .Case("bls", "b.ls")
3754 .Case("bge", "b.ge")
3755 .Case("blt", "b.lt")
3756 .Case("bgt", "b.gt")
3757 .Case("ble", "b.le")
3758 .Case("bal", "b.al")
3759 .Case("bnv", "b.nv")
3760 .Default(Name);
3761
3762 // First check for the AArch64-specific .req directive.
3763 if (Parser.getTok().is(AsmToken::Identifier) &&
3764 Parser.getTok().getIdentifier() == ".req") {
3765 parseDirectiveReq(Name, NameLoc);
3766 // We always return 'error' for this, as we're done with this
3767 // statement and don't need to match the 'instruction."
3768 return true;
3769 }
3770
3771 // Create the leading tokens for the mnemonic, split by '.' characters.
3772 size_t Start = 0, Next = Name.find('.');
3773 StringRef Head = Name.slice(Start, Next);
3774
3775 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
3776 // the SYS instruction.
3777 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
3778 Head == "cfp" || Head == "dvp" || Head == "cpp")
3779 return parseSysAlias(Head, NameLoc, Operands);
3780
3781 Operands.push_back(
3782 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3783 Mnemonic = Head;
3784
3785 // Handle condition codes for a branch mnemonic
3786 if (Head == "b" && Next != StringRef::npos) {
3787 Start = Next;
3788 Next = Name.find('.', Start + 1);
3789 Head = Name.slice(Start + 1, Next);
3790
3791 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3792 (Head.data() - Name.data()));
3793 AArch64CC::CondCode CC = parseCondCodeString(Head);
3794 if (CC == AArch64CC::Invalid)
3795 return Error(SuffixLoc, "invalid condition code");
3796 Operands.push_back(
3797 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3798 Operands.push_back(
3799 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3800 }
3801
3802 // Add the remaining tokens in the mnemonic.
3803 while (Next != StringRef::npos) {
3804 Start = Next;
3805 Next = Name.find('.', Start + 1);
3806 Head = Name.slice(Start, Next);
3807 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3808 (Head.data() - Name.data()) + 1);
3809 Operands.push_back(
3810 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3811 }
3812
3813 // Conditional compare instructions have a Condition Code operand, which needs
3814 // to be parsed and an immediate operand created.
3815 bool condCodeFourthOperand =
3816 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3817 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3818 Head == "csinc" || Head == "csinv" || Head == "csneg");
3819
3820 // These instructions are aliases to some of the conditional select
3821 // instructions. However, the condition code is inverted in the aliased
3822 // instruction.
3823 //
3824 // FIXME: Is this the correct way to handle these? Or should the parser
3825 // generate the aliased instructions directly?
3826 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3827 bool condCodeThirdOperand =
3828 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3829
3830 // Read the remaining operands.
3831 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3832
3833 unsigned N = 1;
3834 do {
3835 // Parse and remember the operand.
3836 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3837 (N == 3 && condCodeThirdOperand) ||
3838 (N == 2 && condCodeSecondOperand),
3839 condCodeSecondOperand || condCodeThirdOperand)) {
3840 return true;
3841 }
3842
3843 // After successfully parsing some operands there are two special cases to
3844 // consider (i.e. notional operands not separated by commas). Both are due
3845 // to memory specifiers:
3846 // + An RBrac will end an address for load/store/prefetch
3847 // + An '!' will indicate a pre-indexed operation.
3848 //
3849 // It's someone else's responsibility to make sure these tokens are sane
3850 // in the given context!
3851
3852 SMLoc RLoc = Parser.getTok().getLoc();
3853 if (parseOptionalToken(AsmToken::RBrac))
3854 Operands.push_back(
3855 AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3856 SMLoc ELoc = Parser.getTok().getLoc();
3857 if (parseOptionalToken(AsmToken::Exclaim))
3858 Operands.push_back(
3859 AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3860
3861 ++N;
3862 } while (parseOptionalToken(AsmToken::Comma));
3863 }
3864
3865 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3866 return true;
3867
3868 return false;
3869}
3870
3871static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
3872 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31))(((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31
)) ? static_cast<void> (0) : __assert_fail ("(ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3872, __PRETTY_FUNCTION__))
;
3873 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
3874 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
3875 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
3876 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
3877 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
3878 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
3879}
3880
3881// FIXME: This entire function is a giant hack to provide us with decent
3882// operand range validation/diagnostics until TableGen/MC can be extended
3883// to support autogeneration of this kind of validation.
3884bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
3885 SmallVectorImpl<SMLoc> &Loc) {
3886 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3887 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
3888
3889 // A prefix only applies to the instruction following it. Here we extract
3890 // prefix information for the next instruction before validating the current
3891 // one so that in the case of failure we don't erronously continue using the
3892 // current prefix.
3893 PrefixInfo Prefix = NextPrefix;
3894 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
3895
3896 // Before validating the instruction in isolation we run through the rules
3897 // applicable when it follows a prefix instruction.
3898 // NOTE: brk & hlt can be prefixed but require no additional validation.
3899 if (Prefix.isActive() &&
3900 (Inst.getOpcode() != AArch64::BRK) &&
3901 (Inst.getOpcode() != AArch64::HLT)) {
3902
3903 // Prefixed intructions must have a destructive operand.
3904 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
3905 AArch64::NotDestructive)
3906 return Error(IDLoc, "instruction is unpredictable when following a"
3907 " movprfx, suggest replacing movprfx with mov");
3908
3909 // Destination operands must match.
3910 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
3911 return Error(Loc[0], "instruction is unpredictable when following a"
3912 " movprfx writing to a different destination");
3913
3914 // Destination operand must not be used in any other location.
3915 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
3916 if (Inst.getOperand(i).isReg() &&
3917 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
3918 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
3919 return Error(Loc[0], "instruction is unpredictable when following a"
3920 " movprfx and destination also used as non-destructive"
3921 " source");
3922 }
3923
3924 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
3925 if (Prefix.isPredicated()) {
3926 int PgIdx = -1;
3927
3928 // Find the instructions general predicate.
3929 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
3930 if (Inst.getOperand(i).isReg() &&
3931 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
3932 PgIdx = i;
3933 break;
3934 }
3935
3936 // Instruction must be predicated if the movprfx is predicated.
3937 if (PgIdx == -1 ||
3938 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
3939 return Error(IDLoc, "instruction is unpredictable when following a"
3940 " predicated movprfx, suggest using unpredicated movprfx");
3941
3942 // Instruction must use same general predicate as the movprfx.
3943 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
3944 return Error(IDLoc, "instruction is unpredictable when following a"
3945 " predicated movprfx using a different general predicate");
3946
3947 // Instruction element type must match the movprfx.
3948 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
3949 return Error(IDLoc, "instruction is unpredictable when following a"
3950 " predicated movprfx with a different element size");
3951 }
3952 }
3953
3954 // Check for indexed addressing modes w/ the base register being the
3955 // same as a destination/source register or pair load where
3956 // the Rt == Rt2. All of those are undefined behaviour.
3957 switch (Inst.getOpcode()) {
3958 case AArch64::LDPSWpre:
3959 case AArch64::LDPWpost:
3960 case AArch64::LDPWpre:
3961 case AArch64::LDPXpost:
3962 case AArch64::LDPXpre: {
3963 unsigned Rt = Inst.getOperand(1).getReg();
3964 unsigned Rt2 = Inst.getOperand(2).getReg();
3965 unsigned Rn = Inst.getOperand(3).getReg();
3966 if (RI->isSubRegisterEq(Rn, Rt))
3967 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3968 "is also a destination");
3969 if (RI->isSubRegisterEq(Rn, Rt2))
3970 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3971 "is also a destination");
3972 LLVM_FALLTHROUGH[[clang::fallthrough]];
3973 }
3974 case AArch64::LDPDi:
3975 case AArch64::LDPQi:
3976 case AArch64::LDPSi:
3977 case AArch64::LDPSWi:
3978 case AArch64::LDPWi:
3979 case AArch64::LDPXi: {
3980 unsigned Rt = Inst.getOperand(0).getReg();
3981 unsigned Rt2 = Inst.getOperand(1).getReg();
3982 if (Rt == Rt2)
3983 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3984 break;
3985 }
3986 case AArch64::LDPDpost:
3987 case AArch64::LDPDpre:
3988 case AArch64::LDPQpost:
3989 case AArch64::LDPQpre:
3990 case AArch64::LDPSpost:
3991 case AArch64::LDPSpre:
3992 case AArch64::LDPSWpost: {
3993 unsigned Rt = Inst.getOperand(1).getReg();
3994 unsigned Rt2 = Inst.getOperand(2).getReg();
3995 if (Rt == Rt2)
3996 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3997 break;
3998 }
3999 case AArch64::STPDpost:
4000 case AArch64::STPDpre:
4001 case AArch64::STPQpost:
4002 case AArch64::STPQpre:
4003 case AArch64::STPSpost:
4004 case AArch64::STPSpre:
4005 case AArch64::STPWpost:
4006 case AArch64::STPWpre:
4007 case AArch64::STPXpost:
4008 case AArch64::STPXpre: {
4009 unsigned Rt = Inst.getOperand(1).getReg();
4010 unsigned Rt2 = Inst.getOperand(2).getReg();
4011 unsigned Rn = Inst.getOperand(3).getReg();
4012 if (RI->isSubRegisterEq(Rn, Rt))
4013 return Error(Loc[0], "unpredictable STP instruction, writeback base "
4014 "is also a source");
4015 if (RI->isSubRegisterEq(Rn, Rt2))
4016 return Error(Loc[1], "unpredictable STP instruction, writeback base "
4017 "is also a source");
4018 break;
4019 }
4020 case AArch64::LDRBBpre:
4021 case AArch64::LDRBpre:
4022 case AArch64::LDRHHpre:
4023 case AArch64::LDRHpre:
4024 case AArch64::LDRSBWpre:
4025 case AArch64::LDRSBXpre:
4026 case AArch64::LDRSHWpre:
4027 case AArch64::LDRSHXpre:
4028 case AArch64::LDRSWpre:
4029 case AArch64::LDRWpre:
4030 case AArch64::LDRXpre:
4031 case AArch64::LDRBBpost:
4032 case AArch64::LDRBpost:
4033 case AArch64::LDRHHpost:
4034 case AArch64::LDRHpost:
4035 case AArch64::LDRSBWpost:
4036 case AArch64::LDRSBXpost:
4037 case AArch64::LDRSHWpost:
4038 case AArch64::LDRSHXpost:
4039 case AArch64::LDRSWpost:
4040 case AArch64::LDRWpost:
4041 case AArch64::LDRXpost: {
4042 unsigned Rt = Inst.getOperand(1).getReg();
4043 unsigned Rn = Inst.getOperand(2).getReg();
4044 if (RI->isSubRegisterEq(Rn, Rt))
4045 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4046 "is also a source");
4047 break;
4048 }
4049 case AArch64::STRBBpost:
4050 case AArch64::STRBpost:
4051 case AArch64::STRHHpost:
4052 case AArch64::STRHpost:
4053 case AArch64::STRWpost:
4054 case AArch64::STRXpost:
4055 case AArch64::STRBBpre:
4056 case AArch64::STRBpre:
4057 case AArch64::STRHHpre:
4058 case AArch64::STRHpre:
4059 case AArch64::STRWpre:
4060 case AArch64::STRXpre: {
4061 unsigned Rt = Inst.getOperand(1).getReg();
4062 unsigned Rn = Inst.getOperand(2).getReg();
4063 if (RI->isSubRegisterEq(Rn, Rt))
4064 return Error(Loc[0], "unpredictable STR instruction, writeback base "
4065 "is also a source");
4066 break;
4067 }
4068 case AArch64::STXRB:
4069 case AArch64::STXRH:
4070 case AArch64::STXRW:
4071 case AArch64::STXRX:
4072 case AArch64::STLXRB:
4073 case AArch64::STLXRH:
4074 case AArch64::STLXRW:
4075 case AArch64::STLXRX: {
4076 unsigned Rs = Inst.getOperand(0).getReg();
4077 unsigned Rt = Inst.getOperand(1).getReg();
4078 unsigned Rn = Inst.getOperand(2).getReg();
4079 if (RI->isSubRegisterEq(Rt, Rs) ||
4080 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4081 return Error(Loc[0],
4082 "unpredictable STXR instruction, status is also a source");
4083 break;
4084 }
4085 case AArch64::STXPW:
4086 case AArch64::STXPX:
4087 case AArch64::STLXPW:
4088 case AArch64::STLXPX: {
4089 unsigned Rs = Inst.getOperand(0).getReg();
4090 unsigned Rt1 = Inst.getOperand(1).getReg();
4091 unsigned Rt2 = Inst.getOperand(2).getReg();
4092 unsigned Rn = Inst.getOperand(3).getReg();
4093 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4094 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4095 return Error(Loc[0],
4096 "unpredictable STXP instruction, status is also a source");
4097 break;
4098 }
4099 case AArch64::LDGV: {
4100 unsigned Rt = Inst.getOperand(0).getReg();
4101 unsigned Rn = Inst.getOperand(1).getReg();
4102 if (RI->isSubRegisterEq(Rt, Rn)) {
4103 return Error(Loc[0],
4104 "unpredictable LDGV instruction, writeback register is also "
4105 "the target register");
4106 }
4107 }
4108 }
4109
4110
4111 // Now check immediate ranges. Separate from the above as there is overlap
4112 // in the instructions being checked and this keeps the nested conditionals
4113 // to a minimum.
4114 switch (Inst.getOpcode()) {
4115 case AArch64::ADDSWri:
4116 case AArch64::ADDSXri:
4117 case AArch64::ADDWri:
4118 case AArch64::ADDXri:
4119 case AArch64::SUBSWri:
4120 case AArch64::SUBSXri:
4121 case AArch64::SUBWri:
4122 case AArch64::SUBXri: {
4123 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4124 // some slight duplication here.
4125 if (Inst.getOperand(2).isExpr()) {
4126 const MCExpr *Expr = Inst.getOperand(2).getExpr();
4127 AArch64MCExpr::VariantKind ELFRefKind;
4128 MCSymbolRefExpr::VariantKind DarwinRefKind;
4129 int64_t Addend;
4130 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4131
4132 // Only allow these with ADDXri.
4133 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4134 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4135 Inst.getOpcode() == AArch64::ADDXri)
4136 return false;
4137
4138 // Only allow these with ADDXri/ADDWri
4139 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4140 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4141 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4142 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4143 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4144 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4145 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4146 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4147 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4148 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4149 (Inst.getOpcode() == AArch64::ADDXri ||
4150 Inst.getOpcode() == AArch64::ADDWri))
4151 return false;
4152
4153 // Don't allow symbol refs in the immediate field otherwise
4154 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4155 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4156 // 'cmp w0, 'borked')
4157 return Error(Loc.back(), "invalid immediate expression");
4158 }
4159 // We don't validate more complex expressions here
4160 }
4161 return false;
4162 }
4163 default:
4164 return false;
4165 }
4166}
4167
4168static std::string AArch64MnemonicSpellCheck(StringRef S, uint64_t FBS,
4169 unsigned VariantID = 0);
4170
4171bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4172 uint64_t ErrorInfo,
4173 OperandVector &Operands) {
4174 switch (ErrCode) {
4175 case Match_InvalidTiedOperand: {
4176 RegConstraintEqualityTy EqTy =
4177 static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4178 .getRegEqualityTy();
4179 switch (EqTy) {
4180 case RegConstraintEqualityTy::EqualsSubReg:
4181 return Error(Loc, "operand must be 64-bit form of destination register");
4182 case RegConstraintEqualityTy::EqualsSuperReg:
4183 return Error(Loc, "operand must be 32-bit form of destination register");
4184 case RegConstraintEqualityTy::EqualsReg:
4185 return Error(Loc, "operand must match destination register");
4186 }
4187 llvm_unreachable("Unknown RegConstraintEqualityTy")::llvm::llvm_unreachable_internal("Unknown RegConstraintEqualityTy"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4187)
;
4188 }
4189 case Match_MissingFeature:
4190 return Error(Loc,
4191 "instruction requires a CPU feature not currently enabled");
4192 case Match_InvalidOperand:
4193 return Error(Loc, "invalid operand for instruction");
4194 case Match_InvalidSuffix:
4195 return Error(Loc, "invalid type suffix for instruction");
4196 case Match_InvalidCondCode:
4197 return Error(Loc, "expected AArch64 condition code");
4198 case Match_AddSubRegExtendSmall:
4199 return Error(Loc,
4200 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
4201 case Match_AddSubRegExtendLarge:
4202 return Error(Loc,
4203 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4204 case Match_AddSubSecondSource:
4205 return Error(Loc,
4206 "expected compatible register, symbol or integer in range [0, 4095]");
4207 case Match_LogicalSecondSource:
4208 return Error(Loc, "expected compatible register or logical immediate");
4209 case Match_InvalidMovImm32Shift:
4210 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4211 case Match_InvalidMovImm64Shift:
4212 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4213 case Match_AddSubRegShift32:
4214 return Error(Loc,
4215 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4216 case Match_AddSubRegShift64:
4217 return Error(Loc,
4218 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4219 case Match_InvalidFPImm:
4220 return Error(Loc,
4221 "expected compatible register or floating-point constant");
4222 case Match_InvalidMemoryIndexedSImm6:
4223 return Error(Loc, "index must be an integer in range [-32, 31].");
4224 case Match_InvalidMemoryIndexedSImm5:
4225 return Error(Loc, "index must be an integer in range [-16, 15].");
4226 case Match_InvalidMemoryIndexed1SImm4:
4227 return Error(Loc, "index must be an integer in range [-8, 7].");
4228 case Match_InvalidMemoryIndexed2SImm4:
4229 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4230 case Match_InvalidMemoryIndexed3SImm4:
4231 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4232 case Match_InvalidMemoryIndexed4SImm4:
4233 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4234 case Match_InvalidMemoryIndexed16SImm4:
4235 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4236 case Match_InvalidMemoryIndexed1SImm6:
4237 return Error(Loc, "index must be an integer in range [-32, 31].");
4238 case Match_InvalidMemoryIndexedSImm8:
4239 return Error(Loc, "index must be an integer in range [-128, 127].");
4240 case Match_InvalidMemoryIndexedSImm9:
4241 return Error(Loc, "index must be an integer in range [-256, 255].");
4242 case Match_InvalidMemoryIndexed16SImm9:
4243 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4244 case Match_InvalidMemoryIndexed8SImm10:
4245 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4246 case Match_InvalidMemoryIndexed4SImm7:
4247 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4248 case Match_InvalidMemoryIndexed8SImm7:
4249 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4250 case Match_InvalidMemoryIndexed16SImm7:
4251 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4252 case Match_InvalidMemoryIndexed8UImm5:
4253 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4254 case Match_InvalidMemoryIndexed4UImm5:
4255 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4256 case Match_InvalidMemoryIndexed2UImm5:
4257 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4258 case Match_InvalidMemoryIndexed8UImm6:
4259 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4260 case Match_InvalidMemoryIndexed16UImm6:
4261 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4262 case Match_InvalidMemoryIndexed4UImm6:
4263 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4264 case Match_InvalidMemoryIndexed2UImm6:
4265 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4266 case Match_InvalidMemoryIndexed1UImm6:
4267 return Error(Loc, "index must be in range [0, 63].");
4268 case Match_InvalidMemoryWExtend8:
4269 return Error(Loc,
4270 "expected 'uxtw' or 'sxtw' with optional shift of #0");
4271 case Match_InvalidMemoryWExtend16:
4272 return Error(Loc,
4273 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4274 case Match_InvalidMemoryWExtend32:
4275 return Error(Loc,
4276 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4277 case Match_InvalidMemoryWExtend64:
4278 return Error(Loc,
4279 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4280 case Match_InvalidMemoryWExtend128:
4281 return Error(Loc,
4282 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4283 case Match_InvalidMemoryXExtend8:
4284 return Error(Loc,
4285 "expected 'lsl' or 'sxtx' with optional shift of #0");
4286 case Match_InvalidMemoryXExtend16:
4287 return Error(Loc,
4288 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4289 case Match_InvalidMemoryXExtend32:
4290 return Error(Loc,
4291 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4292 case Match_InvalidMemoryXExtend64:
4293 return Error(Loc,
4294 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4295 case Match_InvalidMemoryXExtend128:
4296 return Error(Loc,
4297 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4298 case Match_InvalidMemoryIndexed1:
4299 return Error(Loc, "index must be an integer in range [0, 4095].");
4300 case Match_InvalidMemoryIndexed2:
4301 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4302 case Match_InvalidMemoryIndexed4:
4303 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4304 case Match_InvalidMemoryIndexed8:
4305 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4306 case Match_InvalidMemoryIndexed16:
4307 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4308 case Match_InvalidImm0_1:
4309 return Error(Loc, "immediate must be an integer in range [0, 1].");
4310 case Match_InvalidImm0_7:
4311 return Error(Loc, "immediate must be an integer in range [0, 7].");
4312 case Match_InvalidImm0_15:
4313 return Error(Loc, "immediate must be an integer in range [0, 15].");
4314 case Match_InvalidImm0_31:
4315 return Error(Loc, "immediate must be an integer in range [0, 31].");
4316 case Match_InvalidImm0_63:
4317 return Error(Loc, "immediate must be an integer in range [0, 63].");
4318 case Match_InvalidImm0_127:
4319 return Error(Loc, "immediate must be an integer in range [0, 127].");
4320 case Match_InvalidImm0_255:
4321 return Error(Loc, "immediate must be an integer in range [0, 255].");
4322 case Match_InvalidImm0_65535:
4323 return Error(Loc, "immediate must be an integer in range [0, 65535].");
4324 case Match_InvalidImm1_8:
4325 return Error(Loc, "immediate must be an integer in range [1, 8].");
4326 case Match_InvalidImm1_16:
4327 return Error(Loc, "immediate must be an integer in range [1, 16].");
4328 case Match_InvalidImm1_32:
4329 return Error(Loc, "immediate must be an integer in range [1, 32].");
4330 case Match_InvalidImm1_64:
4331 return Error(Loc, "immediate must be an integer in range [1, 64].");
4332 case Match_InvalidSVEAddSubImm8:
4333 return Error(Loc, "immediate must be an integer in range [0, 255]"
4334 " with a shift amount of 0");
4335 case Match_InvalidSVEAddSubImm16:
4336 case Match_InvalidSVEAddSubImm32:
4337 case Match_InvalidSVEAddSubImm64:
4338 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4339 "multiple of 256 in range [256, 65280]");
4340 case Match_InvalidSVECpyImm8:
4341 return Error(Loc, "immediate must be an integer in range [-128, 255]"
4342 " with a shift amount of 0");
4343 case Match_InvalidSVECpyImm16:
4344 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4345 "multiple of 256 in range [-32768, 65280]");
4346 case Match_InvalidSVECpyImm32:
4347 case Match_InvalidSVECpyImm64:
4348 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4349 "multiple of 256 in range [-32768, 32512]");
4350 case Match_InvalidIndexRange1_1:
4351 return Error(Loc, "expected lane specifier '[1]'");
4352 case Match_InvalidIndexRange0_15:
4353 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4354 case Match_InvalidIndexRange0_7:
4355 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4356 case Match_InvalidIndexRange0_3:
4357 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4358 case Match_InvalidIndexRange0_1:
4359 return Error(Loc, "vector lane must be an integer in range [0, 1].");
4360 case Match_InvalidSVEIndexRange0_63:
4361 return Error(Loc, "vector lane must be an integer in range [0, 63].");
4362 case Match_InvalidSVEIndexRange0_31:
4363 return Error(Loc, "vector lane must be an integer in range [0, 31].");
4364 case Match_InvalidSVEIndexRange0_15:
4365 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4366 case Match_InvalidSVEIndexRange0_7:
4367 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4368 case Match_InvalidSVEIndexRange0_3:
4369 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4370 case Match_InvalidLabel:
4371 return Error(Loc, "expected label or encodable integer pc offset");
4372 case Match_MRS:
4373 return Error(Loc, "expected readable system register");
4374 case Match_MSR:
4375 return Error(Loc, "expected writable system register or pstate");
4376 case Match_InvalidComplexRotationEven:
4377 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4378 case Match_InvalidComplexRotationOdd:
4379 return Error(Loc, "complex rotation must be 90 or 270.");
4380 case Match_MnemonicFail: {
4381 std::string Suggestion = AArch64MnemonicSpellCheck(
4382 ((AArch64Operand &)*Operands[0]).getToken(),
4383 ComputeAvailableFeatures(STI->getFeatureBits()));
4384 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4385 }
4386 case Match_InvalidGPR64shifted8:
4387 return Error(Loc, "register must be x0..x30 or xzr, without shift");
4388 case Match_InvalidGPR64shifted16:
4389 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4390 case Match_InvalidGPR64shifted32:
4391 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4392 case Match_InvalidGPR64shifted64:
4393 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4394 case Match_InvalidGPR64NoXZRshifted8:
4395 return Error(Loc, "register must be x0..x30 without shift");
4396 case Match_InvalidGPR64NoXZRshifted16:
4397 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4398 case Match_InvalidGPR64NoXZRshifted32:
4399 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4400 case Match_InvalidGPR64NoXZRshifted64:
4401 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4402 case Match_InvalidZPR32UXTW8:
4403 case Match_InvalidZPR32SXTW8:
4404 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4405 case Match_InvalidZPR32UXTW16:
4406 case Match_InvalidZPR32SXTW16:
4407 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4408 case Match_InvalidZPR32UXTW32:
4409 case Match_InvalidZPR32SXTW32:
4410 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4411 case Match_InvalidZPR32UXTW64:
4412 case Match_InvalidZPR32SXTW64:
4413 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4414 case Match_InvalidZPR64UXTW8:
4415 case Match_InvalidZPR64SXTW8:
4416 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4417 case Match_InvalidZPR64UXTW16:
4418 case Match_InvalidZPR64SXTW16:
4419 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4420 case Match_InvalidZPR64UXTW32:
4421 case Match_InvalidZPR64SXTW32:
4422 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4423 case Match_InvalidZPR64UXTW64:
4424 case Match_InvalidZPR64SXTW64:
4425 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4426 case Match_InvalidZPR32LSL8:
4427 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4428 case Match_InvalidZPR32LSL16:
4429 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4430 case Match_InvalidZPR32LSL32:
4431 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4432 case Match_InvalidZPR32LSL64:
4433 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4434 case Match_InvalidZPR64LSL8:
4435 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4436 case Match_InvalidZPR64LSL16:
4437 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4438 case Match_InvalidZPR64LSL32:
4439 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4440 case Match_InvalidZPR64LSL64:
4441 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4442 case Match_InvalidZPR0:
4443 return Error(Loc, "expected register without element width sufix");
4444 case Match_InvalidZPR8:
4445 case Match_InvalidZPR16:
4446 case Match_InvalidZPR32:
4447 case Match_InvalidZPR64:
4448 case Match_InvalidZPR128:
4449 return Error(Loc, "invalid element width");
4450 case Match_InvalidZPR_3b8:
4451 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4452 case Match_InvalidZPR_3b16:
4453 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4454 case Match_InvalidZPR_3b32:
4455 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4456 case Match_InvalidZPR_4b16:
4457 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4458 case Match_InvalidZPR_4b32:
4459 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4460 case Match_InvalidZPR_4b64:
4461 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4462 case Match_InvalidSVEPattern:
4463 return Error(Loc, "invalid predicate pattern");
4464 case Match_InvalidSVEPredicateAnyReg:
4465 case Match_InvalidSVEPredicateBReg:
4466 case Match_InvalidSVEPredicateHReg:
4467 case Match_InvalidSVEPredicateSReg:
4468 case Match_InvalidSVEPredicateDReg:
4469 return Error(Loc, "invalid predicate register.");
4470 case Match_InvalidSVEPredicate3bAnyReg:
4471 case Match_InvalidSVEPredicate3bBReg:
4472 case Match_InvalidSVEPredicate3bHReg:
4473 case Match_InvalidSVEPredicate3bSReg:
4474 case Match_InvalidSVEPredicate3bDReg:
4475 return Error(Loc, "restricted predicate has range [0, 7].");
4476 case Match_InvalidSVEExactFPImmOperandHalfOne:
4477 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4478 case Match_InvalidSVEExactFPImmOperandHalfTwo:
4479 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4480 case Match_InvalidSVEExactFPImmOperandZeroOne:
4481 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4482 default:
4483 llvm_unreachable("unexpected error code!")::llvm::llvm_unreachable_internal("unexpected error code!", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4483)
;
4484 }
4485}
4486
4487static const char *getSubtargetFeatureName(uint64_t Val);
4488
4489bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4490 OperandVector &Operands,
4491 MCStreamer &Out,
4492 uint64_t &ErrorInfo,
4493 bool MatchingInlineAsm) {
4494 assert(!Operands.empty() && "Unexpect empty operand list!")((!Operands.empty() && "Unexpect empty operand list!"
) ? static_cast<void> (0) : __assert_fail ("!Operands.empty() && \"Unexpect empty operand list!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4494, __PRETTY_FUNCTION__))
;
4495 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4496 assert(Op.isToken() && "Leading operand should always be a mnemonic!")((Op.isToken() && "Leading operand should always be a mnemonic!"
) ? static_cast<void> (0) : __assert_fail ("Op.isToken() && \"Leading operand should always be a mnemonic!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4496, __PRETTY_FUNCTION__))
;
4497
4498 StringRef Tok = Op.getToken();
4499 unsigned NumOperands = Operands.size();
4500
4501 if (NumOperands == 4 && Tok == "lsl") {
4502 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4503 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4504 if (Op2.isScalarReg() && Op3.isImm()) {
4505 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4506 if (Op3CE) {
4507 uint64_t Op3Val = Op3CE->getValue();
4508 uint64_t NewOp3Val = 0;
4509 uint64_t NewOp4Val = 0;
4510 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4511 Op2.getReg())) {
4512 NewOp3Val = (32 - Op3Val) & 0x1f;
4513 NewOp4Val = 31 - Op3Val;
4514 } else {
4515 NewOp3Val = (64 - Op3Val) & 0x3f;
4516 NewOp4Val = 63 - Op3Val;
4517 }
4518
4519 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4520 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4521
4522 Operands[0] = AArch64Operand::CreateToken(
4523 "ubfm", false, Op.getStartLoc(), getContext());
4524 Operands.push_back(AArch64Operand::CreateImm(
4525 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4526 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4527 Op3.getEndLoc(), getContext());
4528 }
4529 }
4530 } else if (NumOperands == 4 && Tok == "bfc") {
4531 // FIXME: Horrible hack to handle BFC->BFM alias.
4532 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4533 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4534 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4535
4536 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4537 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4538 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4539
4540 if (LSBCE && WidthCE) {
4541 uint64_t LSB = LSBCE->getValue();
4542 uint64_t Width = WidthCE->getValue();
4543
4544 uint64_t RegWidth = 0;
4545 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4546 Op1.getReg()))
4547 RegWidth = 64;
4548 else
4549 RegWidth = 32;
4550
4551 if (LSB >= RegWidth)
4552 return Error(LSBOp.getStartLoc(),
4553 "expected integer in range [0, 31]");
4554 if (Width < 1 || Width > RegWidth)
4555 return Error(WidthOp.getStartLoc(),
4556 "expected integer in range [1, 32]");
4557
4558 uint64_t ImmR = 0;
4559 if (RegWidth == 32)
4560 ImmR = (32 - LSB) & 0x1f;
4561 else
4562 ImmR = (64 - LSB) & 0x3f;
4563
4564 uint64_t ImmS = Width - 1;
4565
4566 if (ImmR != 0 && ImmS >= ImmR)
4567 return Error(WidthOp.getStartLoc(),
4568 "requested insert overflows register");
4569
4570 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4571 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4572 Operands[0] = AArch64Operand::CreateToken(
4573 "bfm", false, Op.getStartLoc(), getContext());
4574 Operands[2] = AArch64Operand::CreateReg(
4575 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4576 SMLoc(), SMLoc(), getContext());
4577 Operands[3] = AArch64Operand::CreateImm(
4578 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4579 Operands.emplace_back(
4580 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4581 WidthOp.getEndLoc(), getContext()));
4582 }
4583 }
4584 } else if (NumOperands == 5) {
4585 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4586 // UBFIZ -> UBFM aliases.
4587 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4588 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4589 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4590 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4591
4592 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4593 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4594 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4595
4596 if (Op3CE && Op4CE) {
4597 uint64_t Op3Val = Op3CE->getValue();
4598 uint64_t Op4Val = Op4CE->getValue();
4599
4600 uint64_t RegWidth = 0;
4601 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4602 Op1.getReg()))
4603 RegWidth = 64;
4604 else
4605 RegWidth = 32;
4606
4607 if (Op3Val >= RegWidth)
4608 return Error(Op3.getStartLoc(),
4609 "expected integer in range [0, 31]");
4610 if (Op4Val < 1 || Op4Val > RegWidth)
4611 return Error(Op4.getStartLoc(),
4612 "expected integer in range [1, 32]");
4613
4614 uint64_t NewOp3Val = 0;
4615 if (RegWidth == 32)
4616 NewOp3Val = (32 - Op3Val) & 0x1f;
4617 else
4618 NewOp3Val = (64 - Op3Val) & 0x3f;
4619
4620 uint64_t NewOp4Val = Op4Val - 1;
4621
4622 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4623 return Error(Op4.getStartLoc(),
4624 "requested insert overflows register");
4625
4626 const MCExpr *NewOp3 =
4627 MCConstantExpr::create(NewOp3Val, getContext());
4628 const MCExpr *NewOp4 =
4629 MCConstantExpr::create(NewOp4Val, getContext());
4630 Operands[3] = AArch64Operand::CreateImm(
4631 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4632 Operands[4] = AArch64Operand::CreateImm(
4633 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4634 if (Tok == "bfi")
4635 Operands[0] = AArch64Operand::CreateToken(
4636 "bfm", false, Op.getStartLoc(), getContext());
4637 else if (Tok == "sbfiz")
4638 Operands[0] = AArch64Operand::CreateToken(
4639 "sbfm", false, Op.getStartLoc(), getContext());
4640 else if (Tok == "ubfiz")
4641 Operands[0] = AArch64Operand::CreateToken(
4642 "ubfm", false, Op.getStartLoc(), getContext());
4643 else
4644 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4644)
;
4645 }
4646 }
4647
4648 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4649 // UBFX -> UBFM aliases.
4650 } else if (NumOperands == 5 &&
4651 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4652 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4653 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4654 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4655
4656 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4657 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4658 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4659
4660 if (Op3CE && Op4CE) {
4661 uint64_t Op3Val = Op3CE->getValue();
4662 uint64_t Op4Val = Op4CE->getValue();
4663
4664 uint64_t RegWidth = 0;
4665 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4666 Op1.getReg()))
4667 RegWidth = 64;
4668 else
4669 RegWidth = 32;
4670
4671 if (Op3Val >= RegWidth)
4672 return Error(Op3.getStartLoc(),
4673 "expected integer in range [0, 31]");
4674 if (Op4Val < 1 || Op4Val > RegWidth)
4675 return Error(Op4.getStartLoc(),
4676 "expected integer in range [1, 32]");
4677
4678 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4679
4680 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4681 return Error(Op4.getStartLoc(),
4682 "requested extract overflows register");
4683
4684 const MCExpr *NewOp4 =
4685 MCConstantExpr::create(NewOp4Val, getContext());
4686 Operands[4] = AArch64Operand::CreateImm(
4687 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4688 if (Tok == "bfxil")
4689 Operands[0] = AArch64Operand::CreateToken(
4690 "bfm", false, Op.getStartLoc(), getContext());
4691 else if (Tok == "sbfx")
4692 Operands[0] = AArch64Operand::CreateToken(
4693 "sbfm", false, Op.getStartLoc(), getContext());
4694 else if (Tok == "ubfx")
4695 Operands[0] = AArch64Operand::CreateToken(
4696 "ubfm", false, Op.getStartLoc(), getContext());
4697 else
4698 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4698)
;
4699 }
4700 }
4701 }
4702 }
4703
4704 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4705 // instruction for FP registers correctly in some rare circumstances. Convert
4706 // it to a safe instruction and warn (because silently changing someone's
4707 // assembly is rude).
4708 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4709 NumOperands == 4 && Tok == "movi") {
4710 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4711 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4712 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4713 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4714 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4715 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4716 if (Suffix.lower() == ".2d" &&
4717 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4718 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4719 " correctly on this CPU, converting to equivalent movi.16b");
4720 // Switch the suffix to .16b.
4721 unsigned Idx = Op1.isToken() ? 1 : 2;
4722 Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4723 getContext());
4724 }
4725 }
4726 }
4727
4728 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4729 // InstAlias can't quite handle this since the reg classes aren't
4730 // subclasses.
4731 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4732 // The source register can be Wn here, but the matcher expects a
4733 // GPR64. Twiddle it here if necessary.
4734 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4735 if (Op.isScalarReg()) {
4736 unsigned Reg = getXRegFromWReg(Op.getReg());
4737 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4738 Op.getStartLoc(), Op.getEndLoc(),
4739 getContext());
4740 }
4741 }
4742 // FIXME: Likewise for sxt[bh] with a Xd dst operand
4743 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4744 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4745 if (Op.isScalarReg() &&
4746 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4747 Op.getReg())) {
4748 // The source register can be Wn here, but the matcher expects a
4749 // GPR64. Twiddle it here if necessary.
4750 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4751 if (Op.isScalarReg()) {
4752 unsigned Reg = getXRegFromWReg(Op.getReg());
4753 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4754 Op.getStartLoc(),
4755 Op.getEndLoc(), getContext());
4756 }
4757 }
4758 }
4759 // FIXME: Likewise for uxt[bh] with a Xd dst operand
4760 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4761 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4762 if (Op.isScalarReg() &&
4763 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4764 Op.getReg())) {
4765 // The source register can be Wn here, but the matcher expects a
4766 // GPR32. Twiddle it here if necessary.
4767 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4768 if (Op.isScalarReg()) {
4769 unsigned Reg = getWRegFromXReg(Op.getReg());
4770 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4771 Op.getStartLoc(),
4772 Op.getEndLoc(), getContext());
4773 }
4774 }
4775 }
4776
4777 MCInst Inst;
4778 // First try to match against the secondary set of tables containing the
4779 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4780 unsigned MatchResult =
4781 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4782
4783 // If that fails, try against the alternate table containing long-form NEON:
4784 // "fadd v0.2s, v1.2s, v2.2s"
4785 if (MatchResult != Match_Success) {
4786 // But first, save the short-form match result: we can use it in case the
4787 // long-form match also fails.
4788 auto ShortFormNEONErrorInfo = ErrorInfo;
4789 auto ShortFormNEONMatchResult = MatchResult;
4790
4791 MatchResult =
4792 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4793
4794 // Now, both matches failed, and the long-form match failed on the mnemonic
4795 // suffix token operand. The short-form match failure is probably more
4796 // relevant: use it instead.
4797 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4798 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4799 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4800 MatchResult = ShortFormNEONMatchResult;
4801 ErrorInfo = ShortFormNEONErrorInfo;
4802 }
4803 }
4804
4805 switch (MatchResult) {
4806 case Match_Success: {
4807 // Perform range checking and other semantic validations
4808 SmallVector<SMLoc, 8> OperandLocs;
4809 NumOperands = Operands.size();
4810 for (unsigned i = 1; i < NumOperands; ++i)
4811 OperandLocs.push_back(Operands[i]->getStartLoc());
4812 if (validateInstruction(Inst, IDLoc, OperandLocs))
4813 return true;
4814
4815 Inst.setLoc(IDLoc);
4816 Out.EmitInstruction(Inst, getSTI());
4817 return false;
4818 }
4819 case Match_MissingFeature: {
4820 assert(ErrorInfo && "Unknown missing feature!")((ErrorInfo && "Unknown missing feature!") ? static_cast
<void> (0) : __assert_fail ("ErrorInfo && \"Unknown missing feature!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4820, __PRETTY_FUNCTION__))
;
4821 // Special case the error message for the very common case where only
4822 // a single subtarget feature is missing (neon, e.g.).
4823 std::string Msg = "instruction requires:";
4824 uint64_t Mask = 1;
4825 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4826 if (ErrorInfo & Mask) {
4827 Msg += " ";
4828 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4829 }
4830 Mask <<= 1;
4831 }
4832 return Error(IDLoc, Msg);
4833 }
4834 case Match_MnemonicFail:
4835 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
4836 case Match_InvalidOperand: {
4837 SMLoc ErrorLoc = IDLoc;
4838
4839 if (ErrorInfo != ~0ULL) {
4840 if (ErrorInfo >= Operands.size())
4841 return Error(IDLoc, "too few operands for instruction",
4842 SMRange(IDLoc, getTok().getLoc()));
4843
4844 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4845 if (ErrorLoc == SMLoc())
4846 ErrorLoc = IDLoc;
4847 }
4848 // If the match failed on a suffix token operand, tweak the diagnostic
4849 // accordingly.
4850 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4851 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4852 MatchResult = Match_InvalidSuffix;
4853
4854 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4855 }
4856 case Match_InvalidTiedOperand:
4857 case Match_InvalidMemoryIndexed1:
4858 case Match_InvalidMemoryIndexed2:
4859 case Match_InvalidMemoryIndexed4:
4860 case Match_InvalidMemoryIndexed8:
4861 case Match_InvalidMemoryIndexed16:
4862 case Match_InvalidCondCode:
4863 case Match_AddSubRegExtendSmall:
4864 case Match_AddSubRegExtendLarge:
4865 case Match_AddSubSecondSource:
4866 case Match_LogicalSecondSource:
4867 case Match_AddSubRegShift32:
4868 case Match_AddSubRegShift64:
4869 case Match_InvalidMovImm32Shift:
4870 case Match_InvalidMovImm64Shift:
4871 case Match_InvalidFPImm:
4872 case Match_InvalidMemoryWExtend8:
4873 case Match_InvalidMemoryWExtend16:
4874 case Match_InvalidMemoryWExtend32:
4875 case Match_InvalidMemoryWExtend64:
4876 case Match_InvalidMemoryWExtend128:
4877 case Match_InvalidMemoryXExtend8:
4878 case Match_InvalidMemoryXExtend16:
4879 case Match_InvalidMemoryXExtend32:
4880 case Match_InvalidMemoryXExtend64:
4881 case Match_InvalidMemoryXExtend128:
4882 case Match_InvalidMemoryIndexed1SImm4:
4883 case Match_InvalidMemoryIndexed2SImm4:
4884 case Match_InvalidMemoryIndexed3SImm4:
4885 case Match_InvalidMemoryIndexed4SImm4:
4886 case Match_InvalidMemoryIndexed1SImm6:
4887 case Match_InvalidMemoryIndexed16SImm4:
4888 case Match_InvalidMemoryIndexed4SImm7:
4889 case Match_InvalidMemoryIndexed8SImm7:
4890 case Match_InvalidMemoryIndexed16SImm7:
4891 case Match_InvalidMemoryIndexed8UImm5:
4892 case Match_InvalidMemoryIndexed4UImm5:
4893 case Match_InvalidMemoryIndexed2UImm5:
4894 case Match_InvalidMemoryIndexed1UImm6:
4895 case Match_InvalidMemoryIndexed2UImm6:
4896 case Match_InvalidMemoryIndexed4UImm6:
4897 case Match_InvalidMemoryIndexed8UImm6:
4898 case Match_InvalidMemoryIndexed16UImm6:
4899 case Match_InvalidMemoryIndexedSImm6:
4900 case Match_InvalidMemoryIndexedSImm5:
4901 case Match_InvalidMemoryIndexedSImm8:
4902 case Match_InvalidMemoryIndexedSImm9:
4903 case Match_InvalidMemoryIndexed16SImm9:
4904 case Match_InvalidMemoryIndexed8SImm10:
4905 case Match_InvalidImm0_1:
4906 case Match_InvalidImm0_7:
4907 case Match_InvalidImm0_15:
4908 case Match_InvalidImm0_31:
4909 case Match_InvalidImm0_63:
4910 case Match_InvalidImm0_127:
4911 case Match_InvalidImm0_255:
4912 case Match_InvalidImm0_65535:
4913 case Match_InvalidImm1_8:
4914 case Match_InvalidImm1_16:
4915 case Match_InvalidImm1_32:
4916 case Match_InvalidImm1_64:
4917 case Match_InvalidSVEAddSubImm8:
4918 case Match_InvalidSVEAddSubImm16:
4919 case Match_InvalidSVEAddSubImm32:
4920 case Match_InvalidSVEAddSubImm64:
4921 case Match_InvalidSVECpyImm8:
4922 case Match_InvalidSVECpyImm16:
4923 case Match_InvalidSVECpyImm32:
4924 case Match_InvalidSVECpyImm64:
4925 case Match_InvalidIndexRange1_1:
4926 case Match_InvalidIndexRange0_15:
4927 case Match_InvalidIndexRange0_7:
4928 case Match_InvalidIndexRange0_3:
4929 case Match_InvalidIndexRange0_1:
4930 case Match_InvalidSVEIndexRange0_63:
4931 case Match_InvalidSVEIndexRange0_31:
4932 case Match_InvalidSVEIndexRange0_15:
4933 case Match_InvalidSVEIndexRange0_7:
4934 case Match_InvalidSVEIndexRange0_3:
4935 case Match_InvalidLabel:
4936 case Match_InvalidComplexRotationEven:
4937 case Match_InvalidComplexRotationOdd:
4938 case Match_InvalidGPR64shifted8:
4939 case Match_InvalidGPR64shifted16:
4940 case Match_InvalidGPR64shifted32:
4941 case Match_InvalidGPR64shifted64:
4942 case Match_InvalidGPR64NoXZRshifted8:
4943 case Match_InvalidGPR64NoXZRshifted16:
4944 case Match_InvalidGPR64NoXZRshifted32:
4945 case Match_InvalidGPR64NoXZRshifted64:
4946 case Match_InvalidZPR32UXTW8:
4947 case Match_InvalidZPR32UXTW16:
4948 case Match_InvalidZPR32UXTW32:
4949 case Match_InvalidZPR32UXTW64:
4950 case Match_InvalidZPR32SXTW8:
4951 case Match_InvalidZPR32SXTW16:
4952 case Match_InvalidZPR32SXTW32:
4953 case Match_InvalidZPR32SXTW64:
4954 case Match_InvalidZPR64UXTW8:
4955 case Match_InvalidZPR64SXTW8:
4956 case Match_InvalidZPR64UXTW16:
4957 case Match_InvalidZPR64SXTW16:
4958 case Match_InvalidZPR64UXTW32:
4959 case Match_InvalidZPR64SXTW32:
4960 case Match_InvalidZPR64UXTW64:
4961 case Match_InvalidZPR64SXTW64:
4962 case Match_InvalidZPR32LSL8:
4963 case Match_InvalidZPR32LSL16:
4964 case Match_InvalidZPR32LSL32:
4965 case Match_InvalidZPR32LSL64:
4966 case Match_InvalidZPR64LSL8:
4967 case Match_InvalidZPR64LSL16:
4968 case Match_InvalidZPR64LSL32:
4969 case Match_InvalidZPR64LSL64:
4970 case Match_InvalidZPR0:
4971 case Match_InvalidZPR8:
4972 case Match_InvalidZPR16:
4973 case Match_InvalidZPR32:
4974 case Match_InvalidZPR64:
4975 case Match_InvalidZPR128:
4976 case Match_InvalidZPR_3b8:
4977 case Match_InvalidZPR_3b16:
4978 case Match_InvalidZPR_3b32:
4979 case Match_InvalidZPR_4b16:
4980 case Match_InvalidZPR_4b32:
4981 case Match_InvalidZPR_4b64:
4982 case Match_InvalidSVEPredicateAnyReg:
4983 case Match_InvalidSVEPattern:
4984 case Match_InvalidSVEPredicateBReg:
4985 case Match_InvalidSVEPredicateHReg:
4986 case Match_InvalidSVEPredicateSReg:
4987 case Match_InvalidSVEPredicateDReg:
4988 case Match_InvalidSVEPredicate3bAnyReg:
4989 case Match_InvalidSVEPredicate3bBReg:
4990 case Match_InvalidSVEPredicate3bHReg:
4991 case Match_InvalidSVEPredicate3bSReg:
4992 case Match_InvalidSVEPredicate3bDReg:
4993 case Match_InvalidSVEExactFPImmOperandHalfOne:
4994 case Match_InvalidSVEExactFPImmOperandHalfTwo:
4995 case Match_InvalidSVEExactFPImmOperandZeroOne:
4996 case Match_MSR:
4997 case Match_MRS: {
4998 if (ErrorInfo >= Operands.size())
4999 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5000 // Any time we get here, there's nothing fancy to do. Just get the
5001 // operand SMLoc and display the diagnostic.
5002 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5003 if (ErrorLoc == SMLoc())
5004 ErrorLoc = IDLoc;
5005 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5006 }
5007 }
5008
5009 llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5009)
;
5010}
5011
5012/// ParseDirective parses the arm specific directives
5013bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5014 const MCObjectFileInfo::Environment Format =
5015 getContext().getObjectFileInfo()->getObjectFileType();
5016 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
5017
5018 StringRef IDVal = DirectiveID.getIdentifier();
5019 SMLoc Loc = DirectiveID.getLoc();
5020 if (IDVal == ".arch")
5021 parseDirectiveArch(Loc);
5022 else if (IDVal == ".cpu")
5023 parseDirectiveCPU(Loc);
5024 else if (IDVal == ".tlsdesccall")
5025 parseDirectiveTLSDescCall(Loc);
5026 else if (IDVal == ".ltorg" || IDVal == ".pool")
5027 parseDirectiveLtorg(Loc);
5028 else if (IDVal == ".unreq")
5029 parseDirectiveUnreq(Loc);
5030 else if (IDVal == ".inst")
5031 parseDirectiveInst(Loc);
5032 else if (IDVal == ".cfi_negate_ra_state")
5033 parseDirectiveCFINegateRAState();
5034 else if (IDVal == ".cfi_b_key_frame")
5035 parseDirectiveCFIBKeyFrame();
5036 else if (IsMachO) {
5037 if (IDVal == MCLOHDirectiveName())
5038 parseDirectiveLOH(IDVal, Loc);
5039 else
5040 return true;
5041 } else
5042 return true;
5043 return false;
5044}
5045
5046static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
5047 SmallVector<StringRef, 4> &RequestedExtensions) {
5048 const bool NoCrypto =
5049 (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5050 "nocrypto") != std::end(RequestedExtensions));
5051 const bool Crypto =
5052 (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5053 "crypto") != std::end(RequestedExtensions));
5054
5055 if (!NoCrypto && Crypto) {
5056 switch (ArchKind) {
5057 default:
5058 // Map 'generic' (and others) to sha2 and aes, because
5059 // that was the traditional meaning of crypto.
5060 case AArch64::ArchKind::ARMV8_1A:
5061 case AArch64::ArchKind::ARMV8_2A:
5062 case AArch64::ArchKind::ARMV8_3A:
5063 RequestedExtensions.push_back("sha2");
5064 RequestedExtensions.push_back("aes");
5065 break;
5066 case AArch64::ArchKind::ARMV8_4A:
5067 case AArch64::ArchKind::ARMV8_5A:
5068 RequestedExtensions.push_back("sm4");
5069 RequestedExtensions.push_back("sha3");
5070 RequestedExtensions.push_back("sha2");
5071 RequestedExtensions.push_back("aes");
5072 break;
5073 }
5074 } else if (NoCrypto) {
5075 switch (ArchKind) {
5076 default:
5077 // Map 'generic' (and others) to sha2 and aes, because
5078 // that was the traditional meaning of crypto.
5079 case AArch64::ArchKind::ARMV8_1A:
5080 case AArch64::ArchKind::ARMV8_2A:
5081 case AArch64::ArchKind::ARMV8_3A:
5082 RequestedExtensions.push_back("nosha2");
5083 RequestedExtensions.push_back("noaes");
5084 break;
5085 case AArch64::ArchKind::ARMV8_4A:
5086 case AArch64::ArchKind::ARMV8_5A:
5087 RequestedExtensions.push_back("nosm4");
5088 RequestedExtensions.push_back("nosha3");
5089 RequestedExtensions.push_back("nosha2");
5090 RequestedExtensions.push_back("noaes");
5091 break;
5092 }
5093 }
5094}
5095
5096/// parseDirectiveArch
5097/// ::= .arch token
5098bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
5099 SMLoc ArchLoc = getLoc();
5100
5101 StringRef Arch, ExtensionString;
5102 std::tie(Arch, ExtensionString) =
5103 getParser().parseStringToEndOfStatement().trim().split('+');
5104
5105 AArch64::ArchKind ID = AArch64::parseArch(Arch);
5106 if (ID == AArch64::ArchKind::INVALID)
5107 return Error(ArchLoc, "unknown arch name");
5108
5109 if (parseToken(AsmToken::EndOfStatement))
5110 return true;
5111
5112 // Get the architecture and extension features.
5113 std::vector<StringRef> AArch64Features;
5114 AArch64::getArchFeatures(ID, AArch64Features);
5115 AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
5116 AArch64Features);
5117
5118 MCSubtargetInfo &STI = copySTI();
5119 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
5120 STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
5121
5122 SmallVector<StringRef, 4> RequestedExtensions;
5123 if (!ExtensionString.empty())
5124 ExtensionString.split(RequestedExtensions, '+');
5125
5126 ExpandCryptoAEK(ID, RequestedExtensions);
5127
5128 FeatureBitset Features = STI.getFeatureBits();
5129 for (auto Name : RequestedExtensions) {
5130 bool EnableFeature = true;
5131
5132 if (Name.startswith_lower("no")) {
5133 EnableFeature = false;
5134 Name = Name.substr(2);
5135 }
5136
5137 for (const auto &Extension : ExtensionMap) {
5138 if (Extension.Name != Name)
5139 continue;
5140
5141 if (Extension.Features.none())
5142 report_fatal_error("unsupported architectural extension: " + Name);
5143
5144 FeatureBitset ToggleFeatures = EnableFeature
5145 ? (~Features & Extension.Features)
5146 : ( Features & Extension.Features);
5147 uint64_t Features =
5148 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5149 setAvailableFeatures(Features);
5150 break;
5151 }
5152 }
5153 return false;
5154}
5155
5156static SMLoc incrementLoc(SMLoc L, int Offset) {
5157 return SMLoc::getFromPointer(L.getPointer() + Offset);
5158}
5159
5160/// parseDirectiveCPU
5161/// ::= .cpu id
5162bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5163 SMLoc CurLoc = getLoc();
5164
5165 StringRef CPU, ExtensionString;
5166 std::tie(CPU, ExtensionString) =
5167 getParser().parseStringToEndOfStatement().trim().split('+');
5168
5169 if (parseToken(AsmToken::EndOfStatement))
5170 return true;
5171
5172 SmallVector<StringRef, 4> RequestedExtensions;
5173 if (!ExtensionString.empty())
5174 ExtensionString.split(RequestedExtensions, '+');
5175
5176 // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5177 // once that is tablegen'ed
5178 if (!getSTI().isCPUStringValid(CPU)) {
5179 Error(CurLoc, "unknown CPU name");
5180 return false;
5181 }
5182
5183 MCSubtargetInfo &STI = copySTI();
5184 STI.setDefaultFeatures(CPU, "");
5185 CurLoc = incrementLoc(CurLoc, CPU.size());
5186
5187 ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5188
5189 FeatureBitset Features = STI.getFeatureBits();
5190 for (auto Name : RequestedExtensions) {
5191 // Advance source location past '+'.
5192 CurLoc = incrementLoc(CurLoc, 1);
5193
5194 bool EnableFeature = true;
5195
5196 if (Name.startswith_lower("no")) {
5197 EnableFeature = false;
5198 Name = Name.substr(2);
5199 }
5200
5201 bool FoundExtension = false;
5202 for (const auto &Extension : ExtensionMap) {
5203 if (Extension.Name != Name)
5204 continue;
5205
5206 if (Extension.Features.none())
5207 report_fatal_error("unsupported architectural extension: " + Name);
5208
5209 FeatureBitset ToggleFeatures = EnableFeature
5210 ? (~Features & Extension.Features)
5211 : ( Features & Extension.Features);
5212 uint64_t Features =
5213 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5214 setAvailableFeatures(Features);
5215 FoundExtension = true;
5216
5217 break;
5218 }
5219
5220 if (!FoundExtension)
5221 Error(CurLoc, "unsupported architectural extension");
5222
5223 CurLoc = incrementLoc(CurLoc, Name.size());
5224 }
5225 return false;
5226}
5227
5228/// parseDirectiveInst
5229/// ::= .inst opcode [, ...]
5230bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5231 if (getLexer().is(AsmToken::EndOfStatement))
5232 return Error(Loc, "expected expression following '.inst' directive");
5233
5234 auto parseOp = [&]() -> bool {
5235 SMLoc L = getLoc();
5236 const MCExpr *Expr;
5237 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5238 return true;
5239 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5240 if (check(!Value, L, "expected constant expression"))
5241 return true;
5242 getTargetStreamer().emitInst(Value->getValue());
5243 return false;
5244 };
5245
5246 if (parseMany(parseOp))
5247 return addErrorSuffix(" in '.inst' directive");
5248 return false;
5249}
5250
5251// parseDirectiveTLSDescCall:
5252// ::= .tlsdesccall symbol
5253bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5254 StringRef Name;
5255 if (check(getParser().parseIdentifier(Name), L,
5256 "expected symbol after directive") ||
5257 parseToken(AsmToken::EndOfStatement))
5258 return true;
5259
5260 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5261 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5262 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5263
5264 MCInst Inst;
5265 Inst.setOpcode(AArch64::TLSDESCCALL);
5266 Inst.addOperand(MCOperand::createExpr(Expr));
5267
5268 getParser().getStreamer().EmitInstruction(Inst, getSTI());
5269 return false;
5270}
5271
5272/// ::= .loh <lohName | lohId> label1, ..., labelN
5273/// The number of arguments depends on the loh identifier.
5274bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5275 MCLOHType Kind;
5276 if (getParser().getTok().isNot(AsmToken::Identifier)) {
5277 if (getParser().getTok().isNot(AsmToken::Integer))
5278 return TokError("expected an identifier or a number in directive");
5279 // We successfully get a numeric value for the identifier.
5280 // Check if it is valid.
5281 int64_t Id = getParser().getTok().getIntVal();
5282 if (Id <= -1U && !isValidMCLOHType(Id))
5283 return TokError("invalid numeric identifier in directive");
5284 Kind = (MCLOHType)Id;
5285 } else {
5286 StringRef Name = getTok().getIdentifier();
5287 // We successfully parse an identifier.
5288 // Check if it is a recognized one.
5289 int Id = MCLOHNameToId(Name);
5290
5291 if (Id == -1)
5292 return TokError("invalid identifier in directive");
5293 Kind = (MCLOHType)Id;
5294 }
5295 // Consume the identifier.
5296 Lex();
5297 // Get the number of arguments of this LOH.
5298 int NbArgs = MCLOHIdToNbArgs(Kind);
5299
5300 assert(NbArgs != -1 && "Invalid number of arguments")((NbArgs != -1 && "Invalid number of arguments") ? static_cast
<void> (0) : __assert_fail ("NbArgs != -1 && \"Invalid number of arguments\""
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5300, __PRETTY_FUNCTION__))
;
5301
5302 SmallVector<MCSymbol *, 3> Args;
5303 for (int Idx = 0; Idx < NbArgs; ++Idx) {
5304 StringRef Name;
5305 if (getParser().parseIdentifier(Name))
5306 return TokError("expected identifier in directive");
5307 Args.push_back(getContext().getOrCreateSymbol(Name));
5308
5309 if (Idx + 1 == NbArgs)
5310 break;
5311 if (parseToken(AsmToken::Comma,
5312 "unexpected token in '" + Twine(IDVal) + "' directive"))
5313 return true;
5314 }
5315 if (parseToken(AsmToken::EndOfStatement,
5316 "unexpected token in '" + Twine(IDVal) + "' directive"))
5317 return true;
5318
5319 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
5320 return false;
5321}
5322
5323/// parseDirectiveLtorg
5324/// ::= .ltorg | .pool
5325bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5326 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5327 return true;
5328 getTargetStreamer().emitCurrentConstantPool();
5329 return false;
5330}
5331
5332/// parseDirectiveReq
5333/// ::= name .req registername
5334bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5335 MCAsmParser &Parser = getParser();
5336 Parser.Lex(); // Eat the '.req' token.
5337 SMLoc SRegLoc = getLoc();
5338 RegKind RegisterKind = RegKind::Scalar;
5339 unsigned RegNum;
5340 OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5341
5342 if (ParseRes != MatchOperand_Success) {
5343 StringRef Kind;
5344 RegisterKind = RegKind::NeonVector;
5345 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5346
5347 if (ParseRes == MatchOperand_ParseFail)
5348 return true;
5349
5350 if (ParseRes == MatchOperand_Success && !Kind.empty())
5351 return Error(SRegLoc, "vector register without type specifier expected");
5352 }
5353
5354 if (ParseRes != MatchOperand_Success) {
5355 StringRef Kind;
5356 RegisterKind = RegKind::SVEDataVector;
5357 ParseRes =
5358 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5359
5360 if (ParseRes == MatchOperand_ParseFail)
5361 return true;
5362
5363 if (ParseRes == MatchOperand_Success && !Kind.empty())
5364 return Error(SRegLoc,
5365 "sve vector register without type specifier expected");
5366 }
5367
5368 if (ParseRes != MatchOperand_Success) {
5369 StringRef Kind;
5370 RegisterKind = RegKind::SVEPredicateVector;
5371 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5372
5373 if (ParseRes == MatchOperand_ParseFail)
5374 return true;
5375
5376 if (ParseRes == MatchOperand_Success && !Kind.empty())
5377 return Error(SRegLoc,
5378 "sve predicate register without type specifier expected");
5379 }
5380
5381 if (ParseRes != MatchOperand_Success)
5382 return Error(SRegLoc, "register name or alias expected");
5383
5384 // Shouldn't be anything else.
5385 if (parseToken(AsmToken::EndOfStatement,
5386 "unexpected input in .req directive"))
5387 return true;
5388
5389 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
5390 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
5391 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
5392
5393 return false;
5394}
5395
5396/// parseDirectiveUneq
5397/// ::= .unreq registername
5398bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
5399 MCAsmParser &Parser = getParser();
5400 if (getTok().isNot(AsmToken::Identifier))
5401 return TokError("unexpected input in .unreq directive.");
5402 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
5403 Parser.Lex(); // Eat the identifier.
5404 if (parseToken(AsmToken::EndOfStatement))
5405 return addErrorSuffix("in '.unreq' directive");
5406 return false;
5407}
5408
5409bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
5410 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5411 return true;
5412 getStreamer().EmitCFINegateRAState();
5413 return false;
5414}
5415
5416/// parseDirectiveCFIBKeyFrame
5417/// ::= .cfi_b_key
5418bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
5419 if (parseToken(AsmToken::EndOfStatement,
5420 "unexpected token in '.cfi_b_key_frame'"))
5421 return true;
5422 getStreamer().EmitCFIBKeyFrame();
5423 return false;
5424}
5425
5426bool
5427AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
5428 AArch64MCExpr::VariantKind &ELFRefKind,
5429 MCSymbolRefExpr::VariantKind &DarwinRefKind,
5430 int64_t &Addend) {
5431 ELFRefKind = AArch64MCExpr::VK_INVALID;
5432 DarwinRefKind = MCSymbolRefExpr::VK_None;
5433 Addend = 0;
5434
5435 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
5436 ELFRefKind = AE->getKind();
5437 Expr = AE->getSubExpr();
5438 }
5439
5440 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
5441 if (SE) {
5442 // It's a simple symbol reference with no addend.
5443 DarwinRefKind = SE->getKind();
5444 return true;
5445 }
5446
5447 // Check that it looks like a symbol + an addend
5448 MCValue Res;
5449 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
5450 if (!Relocatable || Res.getSymB())
5451 return false;
5452
5453 // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
5454 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
5455 if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
5456 return false;
5457
5458 if (Res.getSymA())
5459 DarwinRefKind = Res.getSymA()->getKind();
5460 Addend = Res.getConstant();
5461
5462 // It's some symbol reference + a constant addend, but really
5463 // shouldn't use both Darwin and ELF syntax.
5464 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
5465 DarwinRefKind == MCSymbolRefExpr::VK_None;
5466}
5467
5468/// Force static initialization.
5469extern "C" void LLVMInitializeAArch64AsmParser() {
5470 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
5471 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
5472 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
5473}
5474
5475#define GET_REGISTER_MATCHER
5476#define GET_SUBTARGET_FEATURE_NAME
5477#define GET_MATCHER_IMPLEMENTATION
5478#define GET_MNEMONIC_SPELL_CHECKER
5479#include "AArch64GenAsmMatcher.inc"
5480
5481// Define this matcher function after the auto-generated include so we
5482// have the match class enum definitions.
5483unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
5484 unsigned Kind) {
5485 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
5486 // If the kind is a token for a literal immediate, check if our asm
5487 // operand matches. This is for InstAliases which have a fixed-value
5488 // immediate in the syntax.
5489 int64_t ExpectedVal;
5490 switch (Kind) {
5491 default:
5492 return Match_InvalidOperand;
5493 case MCK__35_0:
5494 ExpectedVal = 0;
5495 break;
5496 case MCK__35_1:
5497 ExpectedVal = 1;
5498 break;
5499 case MCK__35_12:
5500 ExpectedVal = 12;
5501 break;
5502 case MCK__35_16:
5503 ExpectedVal = 16;
5504 break;
5505 case MCK__35_2:
5506 ExpectedVal = 2;
5507 break;
5508 case MCK__35_24:
5509 ExpectedVal = 24;
5510 break;
5511 case MCK__35_3:
5512 ExpectedVal = 3;
5513 break;
5514 case MCK__35_32:
5515 ExpectedVal = 32;
5516 break;
5517 case MCK__35_4:
5518 ExpectedVal = 4;
5519 break;
5520 case MCK__35_48:
5521 ExpectedVal = 48;
5522 break;
5523 case MCK__35_6:
5524 ExpectedVal = 6;
5525 break;
5526 case MCK__35_64:
5527 ExpectedVal = 64;
5528 break;
5529 case MCK__35_8:
5530 ExpectedVal = 8;
5531 break;
5532 }
5533 if (!Op.isImm())
5534 return Match_InvalidOperand;
5535 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
5536 if (!CE)
5537 return Match_InvalidOperand;
5538 if (CE->getValue() == ExpectedVal)
5539 return Match_Success;
5540 return Match_InvalidOperand;
5541}
5542
5543OperandMatchResultTy
5544AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
5545
5546 SMLoc S = getLoc();
5547
5548 if (getParser().getTok().isNot(AsmToken::Identifier)) {
5549 Error(S, "expected register");
5550 return MatchOperand_ParseFail;
5551 }
5552
5553 unsigned FirstReg;
5554 OperandMatchResultTy Res = tryParseScalarRegister(FirstReg);
5555 if (Res != MatchOperand_Success)
5556 return MatchOperand_ParseFail;
5557
5558 const MCRegisterClass &WRegClass =
5559 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
5560 const MCRegisterClass &XRegClass =
5561 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
5562
5563 bool isXReg = XRegClass.contains(FirstReg),
5564 isWReg = WRegClass.contains(FirstReg);
5565 if (!isXReg && !isWReg) {
5566 Error(S, "expected first even register of a "
5567 "consecutive same-size even/odd register pair");
5568 return MatchOperand_ParseFail;
5569 }
5570
5571 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5572 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
5573
5574 if (FirstEncoding & 0x1) {
5575 Error(S, "expected first even register of a "
5576 "consecutive same-size even/odd register pair");
5577 return MatchOperand_ParseFail;
5578 }
5579
5580 if (getParser().getTok().isNot(AsmToken::Comma)) {
5581 Error(getLoc(), "expected comma");
5582 return MatchOperand_ParseFail;
5583 }
5584 // Eat the comma
5585 getParser().Lex();
5586
5587 SMLoc E = getLoc();
5588 unsigned SecondReg;
5589 Res = tryParseScalarRegister(SecondReg);
5590 if (Res != MatchOperand_Success)
5591 return MatchOperand_ParseFail;
5592
5593 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
5594 (isXReg && !XRegClass.contains(SecondReg)) ||
5595 (isWReg && !WRegClass.contains(SecondReg))) {
5596 Error(E,"expected second odd register of a "
5597 "consecutive same-size even/odd register pair");
5598 return MatchOperand_ParseFail;
5599 }
5600
5601 unsigned Pair = 0;
5602 if (isXReg) {
5603 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
5604 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
5605 } else {
5606 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
5607 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
5608 }
5609
5610 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
5611 getLoc(), getContext()));
5612
5613 return MatchOperand_Success;
5614}
5615
5616template <bool ParseShiftExtend, bool ParseSuffix>
5617OperandMatchResultTy
5618AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
5619 const SMLoc S = getLoc();
5620 // Check for a SVE vector register specifier first.
5621 unsigned RegNum;
5622 StringRef Kind;
5623
5624 OperandMatchResultTy Res =
5625 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5626
5627 if (Res != MatchOperand_Success)
5628 return Res;
5629
5630 if (ParseSuffix && Kind.empty())
5631 return MatchOperand_NoMatch;
5632
5633 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
5634 if (!KindRes)
5635 return MatchOperand_NoMatch;
5636
5637 unsigned ElementWidth = KindRes->second;
5638
5639 // No shift/extend is the default.
5640 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
5641 Operands.push_back(AArch64Operand::CreateVectorReg(
5642 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
5643
5644 OperandMatchResultTy Res = tryParseVectorIndex(Operands);
5645 if (Res == MatchOperand_ParseFail)
5646 return MatchOperand_ParseFail;
5647 return MatchOperand_Success;
5648 }
5649
5650 // Eat the comma
5651 getParser().Lex();
5652
5653 // Match the shift
5654 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
5655 Res = tryParseOptionalShiftExtend(ExtOpnd);
5656 if (Res != MatchOperand_Success)
5657 return Res;
5658
5659 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
5660 Operands.push_back(AArch64Operand::CreateVectorReg(
5661 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
5662 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5663 Ext->hasShiftExtendAmount()));
5664
5665 return MatchOperand_Success;
5666}
5667
5668OperandMatchResultTy
5669AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
5670 MCAsmParser &Parser = getParser();
5671
5672 SMLoc SS = getLoc();
5673 const AsmToken &TokE = Parser.getTok();
5674 bool IsHash = TokE.is(AsmToken::Hash);
5675
5676 if (!IsHash && TokE.isNot(AsmToken::Identifier))
5677 return MatchOperand_NoMatch;
5678
5679 int64_t Pattern;
5680 if (IsHash) {
5681 Parser.Lex(); // Eat hash
5682
5683 // Parse the immediate operand.
5684 const MCExpr *ImmVal;
5685 SS = getLoc();
5686 if (Parser.parseExpression(ImmVal))
5687 return MatchOperand_ParseFail;
5688
5689 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
5690 if (!MCE)
5691 return MatchOperand_ParseFail;
5692
5693 Pattern = MCE->getValue();
5694 } else {
5695 // Parse the pattern
5696 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
5697 if (!Pat)
5698 return MatchOperand_NoMatch;
5699
5700 Parser.Lex();
5701 Pattern = Pat->Encoding;
5702 assert(Pattern >= 0 && Pattern < 32)((Pattern >= 0 && Pattern < 32) ? static_cast<
void> (0) : __assert_fail ("Pattern >= 0 && Pattern < 32"
, "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5702, __PRETTY_FUNCTION__))
;
5703 }
5704
5705 Operands.push_back(
5706 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
5707 SS, getLoc(), getContext()));
5708
5709 return MatchOperand_Success;
5710}