Bug Summary

File:lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Warning:line 2538, column 7
1st function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AArch64AsmParser.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn345461/include -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/Target/AArch64/AsmParser/.. -I /build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/.. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/Target/AArch64/AsmParser -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-10-27-211344-32123-1 -x c++ /build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp -faddrsig
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/AArch64AddressingModes.h"
11#include "MCTargetDesc/AArch64MCExpr.h"
12#include "MCTargetDesc/AArch64MCTargetDesc.h"
13#include "MCTargetDesc/AArch64TargetStreamer.h"
14#include "AArch64InstrInfo.h"
15#include "Utils/AArch64BaseInfo.h"
16#include "llvm/ADT/APFloat.h"
17#include "llvm/ADT/APInt.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/ADT/StringExtras.h"
22#include "llvm/ADT/StringMap.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/StringSwitch.h"
25#include "llvm/ADT/Twine.h"
26#include "llvm/MC/MCContext.h"
27#include "llvm/MC/MCExpr.h"
28#include "llvm/MC/MCInst.h"
29#include "llvm/MC/MCLinkerOptimizationHint.h"
30#include "llvm/MC/MCObjectFileInfo.h"
31#include "llvm/MC/MCParser/MCAsmLexer.h"
32#include "llvm/MC/MCParser/MCAsmParser.h"
33#include "llvm/MC/MCParser/MCAsmParserExtension.h"
34#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
35#include "llvm/MC/MCParser/MCTargetAsmParser.h"
36#include "llvm/MC/MCRegisterInfo.h"
37#include "llvm/MC/MCStreamer.h"
38#include "llvm/MC/MCSubtargetInfo.h"
39#include "llvm/MC/MCSymbol.h"
40#include "llvm/MC/MCTargetOptions.h"
41#include "llvm/MC/SubtargetFeature.h"
42#include "llvm/MC/MCValue.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/Compiler.h"
45#include "llvm/Support/ErrorHandling.h"
46#include "llvm/Support/MathExtras.h"
47#include "llvm/Support/SMLoc.h"
48#include "llvm/Support/TargetParser.h"
49#include "llvm/Support/TargetRegistry.h"
50#include "llvm/Support/raw_ostream.h"
51#include <cassert>
52#include <cctype>
53#include <cstdint>
54#include <cstdio>
55#include <string>
56#include <tuple>
57#include <utility>
58#include <vector>
59
60using namespace llvm;
61
62namespace {
63
64enum class RegKind {
65 Scalar,
66 NeonVector,
67 SVEDataVector,
68 SVEPredicateVector
69};
70
71enum RegConstraintEqualityTy {
72 EqualsReg,
73 EqualsSuperReg,
74 EqualsSubReg
75};
76
77class AArch64AsmParser : public MCTargetAsmParser {
78private:
79 StringRef Mnemonic; ///< Instruction mnemonic.
80
81 // Map of register aliases registers via the .req directive.
82 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
83
84 class PrefixInfo {
85 public:
86 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
87 PrefixInfo Prefix;
88 switch (Inst.getOpcode()) {
89 case AArch64::MOVPRFX_ZZ:
90 Prefix.Active = true;
91 Prefix.Dst = Inst.getOperand(0).getReg();
92 break;
93 case AArch64::MOVPRFX_ZPmZ_B:
94 case AArch64::MOVPRFX_ZPmZ_H:
95 case AArch64::MOVPRFX_ZPmZ_S:
96 case AArch64::MOVPRFX_ZPmZ_D:
97 Prefix.Active = true;
98 Prefix.Predicated = true;
99 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
100 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 101, __PRETTY_FUNCTION__))
101 "No destructive element size set for movprfx")((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 101, __PRETTY_FUNCTION__))
;
102 Prefix.Dst = Inst.getOperand(0).getReg();
103 Prefix.Pg = Inst.getOperand(2).getReg();
104 break;
105 case AArch64::MOVPRFX_ZPzZ_B:
106 case AArch64::MOVPRFX_ZPzZ_H:
107 case AArch64::MOVPRFX_ZPzZ_S:
108 case AArch64::MOVPRFX_ZPzZ_D:
109 Prefix.Active = true;
110 Prefix.Predicated = true;
111 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
112 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 113, __PRETTY_FUNCTION__))
113 "No destructive element size set for movprfx")((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx"
) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 113, __PRETTY_FUNCTION__))
;
114 Prefix.Dst = Inst.getOperand(0).getReg();
115 Prefix.Pg = Inst.getOperand(1).getReg();
116 break;
117 default:
118 break;
119 }
120
121 return Prefix;
122 }
123
124 PrefixInfo() : Active(false), Predicated(false) {}
125 bool isActive() const { return Active; }
126 bool isPredicated() const { return Predicated; }
127 unsigned getElementSize() const {
128 assert(Predicated)((Predicated) ? static_cast<void> (0) : __assert_fail (
"Predicated", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 128, __PRETTY_FUNCTION__))
;
129 return ElementSize;
130 }
131 unsigned getDstReg() const { return Dst; }
132 unsigned getPgReg() const {
133 assert(Predicated)((Predicated) ? static_cast<void> (0) : __assert_fail (
"Predicated", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 133, __PRETTY_FUNCTION__))
;
134 return Pg;
135 }
136
137 private:
138 bool Active;
139 bool Predicated;
140 unsigned ElementSize;
141 unsigned Dst;
142 unsigned Pg;
143 } NextPrefix;
144
145 AArch64TargetStreamer &getTargetStreamer() {
146 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
147 return static_cast<AArch64TargetStreamer &>(TS);
148 }
149
150 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
151
152 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
153 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
154 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
155 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
156 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
157 bool parseRegister(OperandVector &Operands);
158 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
159 bool parseNeonVectorList(OperandVector &Operands);
160 bool parseOptionalMulOperand(OperandVector &Operands);
161 bool parseOperand(OperandVector &Operands, bool isCondCode,
162 bool invertCondCode);
163
164 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
165 OperandVector &Operands);
166
167 bool parseDirectiveArch(SMLoc L);
168 bool parseDirectiveCPU(SMLoc L);
169 bool parseDirectiveInst(SMLoc L);
170
171 bool parseDirectiveTLSDescCall(SMLoc L);
172
173 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
174 bool parseDirectiveLtorg(SMLoc L);
175
176 bool parseDirectiveReq(StringRef Name, SMLoc L);
177 bool parseDirectiveUnreq(SMLoc L);
178
179 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
180 SmallVectorImpl<SMLoc> &Loc);
181 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
182 OperandVector &Operands, MCStreamer &Out,
183 uint64_t &ErrorInfo,
184 bool MatchingInlineAsm) override;
185/// @name Auto-generated Match Functions
186/// {
187
188#define GET_ASSEMBLER_HEADER
189#include "AArch64GenAsmMatcher.inc"
190
191 /// }
192
193 OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
194 OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
195 RegKind MatchKind);
196 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
197 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
198 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
199 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
200 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
201 template <bool IsSVEPrefetch = false>
202 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
203 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
204 OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
205 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
206 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
207 template<bool AddFPZeroAsLiteral>
208 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
209 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
210 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
211 bool tryParseNeonVectorRegister(OperandVector &Operands);
212 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
213 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
214 template <bool ParseShiftExtend,
215 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
216 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
217 template <bool ParseShiftExtend, bool ParseSuffix>
218 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
219 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
220 template <RegKind VectorKind>
221 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
222 bool ExpectMatch = false);
223 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
224
225public:
226 enum AArch64MatchResultTy {
227 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
228#define GET_OPERAND_DIAGNOSTIC_TYPES
229#include "AArch64GenAsmMatcher.inc"
230 };
231 bool IsILP32;
232
233 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
234 const MCInstrInfo &MII, const MCTargetOptions &Options)
235 : MCTargetAsmParser(Options, STI, MII) {
236 IsILP32 = Options.getABIName() == "ilp32";
237 MCAsmParserExtension::Initialize(Parser);
238 MCStreamer &S = getParser().getStreamer();
239 if (S.getTargetStreamer() == nullptr)
240 new AArch64TargetStreamer(S);
241
242 // Alias .hword/.word/xword to the target-independent .2byte/.4byte/.8byte
243 // directives as they have the same form and semantics:
244 /// ::= (.hword | .word | .xword ) [ expression (, expression)* ]
245 Parser.addAliasForDirective(".hword", ".2byte");
246 Parser.addAliasForDirective(".word", ".4byte");
247 Parser.addAliasForDirective(".xword", ".8byte");
248
249 // Initialize the set of available features.
250 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
251 }
252
253 bool regsEqual(const MCParsedAsmOperand &Op1,
254 const MCParsedAsmOperand &Op2) const override;
255 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
256 SMLoc NameLoc, OperandVector &Operands) override;
257 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
258 bool ParseDirective(AsmToken DirectiveID) override;
259 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
260 unsigned Kind) override;
261
262 static bool classifySymbolRef(const MCExpr *Expr,
263 AArch64MCExpr::VariantKind &ELFRefKind,
264 MCSymbolRefExpr::VariantKind &DarwinRefKind,
265 int64_t &Addend);
266};
267
268/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
269/// instruction.
270class AArch64Operand : public MCParsedAsmOperand {
271private:
272 enum KindTy {
273 k_Immediate,
274 k_ShiftedImm,
275 k_CondCode,
276 k_Register,
277 k_VectorList,
278 k_VectorIndex,
279 k_Token,
280 k_SysReg,
281 k_SysCR,
282 k_Prefetch,
283 k_ShiftExtend,
284 k_FPImm,
285 k_Barrier,
286 k_PSBHint,
287 k_BTIHint,
288 } Kind;
289
290 SMLoc StartLoc, EndLoc;
291
292 struct TokOp {
293 const char *Data;
294 unsigned Length;
295 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
296 };
297
298 // Separate shift/extend operand.
299 struct ShiftExtendOp {
300 AArch64_AM::ShiftExtendType Type;
301 unsigned Amount;
302 bool HasExplicitAmount;
303 };
304
305 struct RegOp {
306 unsigned RegNum;
307 RegKind Kind;
308 int ElementWidth;
309
310 // The register may be allowed as a different register class,
311 // e.g. for GPR64as32 or GPR32as64.
312 RegConstraintEqualityTy EqualityTy;
313
314 // In some cases the shift/extend needs to be explicitly parsed together
315 // with the register, rather than as a separate operand. This is needed
316 // for addressing modes where the instruction as a whole dictates the
317 // scaling/extend, rather than specific bits in the instruction.
318 // By parsing them as a single operand, we avoid the need to pass an
319 // extra operand in all CodeGen patterns (because all operands need to
320 // have an associated value), and we avoid the need to update TableGen to
321 // accept operands that have no associated bits in the instruction.
322 //
323 // An added benefit of parsing them together is that the assembler
324 // can give a sensible diagnostic if the scaling is not correct.
325 //
326 // The default is 'lsl #0' (HasExplicitAmount = false) if no
327 // ShiftExtend is specified.
328 ShiftExtendOp ShiftExtend;
329 };
330
331 struct VectorListOp {
332 unsigned RegNum;
333 unsigned Count;
334 unsigned NumElements;
335 unsigned ElementWidth;
336 RegKind RegisterKind;
337 };
338
339 struct VectorIndexOp {
340 unsigned Val;
341 };
342
343 struct ImmOp {
344 const MCExpr *Val;
345 };
346
347 struct ShiftedImmOp {
348 const MCExpr *Val;
349 unsigned ShiftAmount;
350 };
351
352 struct CondCodeOp {
353 AArch64CC::CondCode Code;
354 };
355
356 struct FPImmOp {
357 uint64_t Val; // APFloat value bitcasted to uint64_t.
358 bool IsExact; // describes whether parsed value was exact.
359 };
360
361 struct BarrierOp {
362 const char *Data;
363 unsigned Length;
364 unsigned Val; // Not the enum since not all values have names.
365 };
366
367 struct SysRegOp {
368 const char *Data;
369 unsigned Length;
370 uint32_t MRSReg;
371 uint32_t MSRReg;
372 uint32_t PStateField;
373 };
374
375 struct SysCRImmOp {
376 unsigned Val;
377 };
378
379 struct PrefetchOp {
380 const char *Data;
381 unsigned Length;
382 unsigned Val;
383 };
384
385 struct PSBHintOp {
386 const char *Data;
387 unsigned Length;
388 unsigned Val;
389 };
390
391 struct BTIHintOp {
392 const char *Data;
393 unsigned Length;
394 unsigned Val;
395 };
396
397 struct ExtendOp {
398 unsigned Val;
399 };
400
401 union {
402 struct TokOp Tok;
403 struct RegOp Reg;
404 struct VectorListOp VectorList;
405 struct VectorIndexOp VectorIndex;
406 struct ImmOp Imm;
407 struct ShiftedImmOp ShiftedImm;
408 struct CondCodeOp CondCode;
409 struct FPImmOp FPImm;
410 struct BarrierOp Barrier;
411 struct SysRegOp SysReg;
412 struct SysCRImmOp SysCRImm;
413 struct PrefetchOp Prefetch;
414 struct PSBHintOp PSBHint;
415 struct BTIHintOp BTIHint;
416 struct ShiftExtendOp ShiftExtend;
417 };
418
419 // Keep the MCContext around as the MCExprs may need manipulated during
420 // the add<>Operands() calls.
421 MCContext &Ctx;
422
423public:
424 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
425
426 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
427 Kind = o.Kind;
428 StartLoc = o.StartLoc;
429 EndLoc = o.EndLoc;
430 switch (Kind) {
431 case k_Token:
432 Tok = o.Tok;
433 break;
434 case k_Immediate:
435 Imm = o.Imm;
436 break;
437 case k_ShiftedImm:
438 ShiftedImm = o.ShiftedImm;
439 break;
440 case k_CondCode:
441 CondCode = o.CondCode;
442 break;
443 case k_FPImm:
444 FPImm = o.FPImm;
445 break;
446 case k_Barrier:
447 Barrier = o.Barrier;
448 break;
449 case k_Register:
450 Reg = o.Reg;
451 break;
452 case k_VectorList:
453 VectorList = o.VectorList;
454 break;
455 case k_VectorIndex:
456 VectorIndex = o.VectorIndex;
457 break;
458 case k_SysReg:
459 SysReg = o.SysReg;
460 break;
461 case k_SysCR:
462 SysCRImm = o.SysCRImm;
463 break;
464 case k_Prefetch:
465 Prefetch = o.Prefetch;
466 break;
467 case k_PSBHint:
468 PSBHint = o.PSBHint;
469 break;
470 case k_BTIHint:
471 BTIHint = o.BTIHint;
472 break;
473 case k_ShiftExtend:
474 ShiftExtend = o.ShiftExtend;
475 break;
476 }
477 }
478
479 /// getStartLoc - Get the location of the first token of this operand.
480 SMLoc getStartLoc() const override { return StartLoc; }
481 /// getEndLoc - Get the location of the last token of this operand.
482 SMLoc getEndLoc() const override { return EndLoc; }
483
484 StringRef getToken() const {
485 assert(Kind == k_Token && "Invalid access!")((Kind == k_Token && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 485, __PRETTY_FUNCTION__))
;
486 return StringRef(Tok.Data, Tok.Length);
487 }
488
489 bool isTokenSuffix() const {
490 assert(Kind == k_Token && "Invalid access!")((Kind == k_Token && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 490, __PRETTY_FUNCTION__))
;
491 return Tok.IsSuffix;
492 }
493
494 const MCExpr *getImm() const {
495 assert(Kind == k_Immediate && "Invalid access!")((Kind == k_Immediate && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 495, __PRETTY_FUNCTION__))
;
496 return Imm.Val;
497 }
498
499 const MCExpr *getShiftedImmVal() const {
500 assert(Kind == k_ShiftedImm && "Invalid access!")((Kind == k_ShiftedImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 500, __PRETTY_FUNCTION__))
;
501 return ShiftedImm.Val;
502 }
503
504 unsigned getShiftedImmShift() const {
505 assert(Kind == k_ShiftedImm && "Invalid access!")((Kind == k_ShiftedImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 505, __PRETTY_FUNCTION__))
;
506 return ShiftedImm.ShiftAmount;
507 }
508
509 AArch64CC::CondCode getCondCode() const {
510 assert(Kind == k_CondCode && "Invalid access!")((Kind == k_CondCode && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 510, __PRETTY_FUNCTION__))
;
511 return CondCode.Code;
512 }
513
514 APFloat getFPImm() const {
515 assert (Kind == k_FPImm && "Invalid access!")((Kind == k_FPImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 515, __PRETTY_FUNCTION__))
;
516 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
517 }
518
519 bool getFPImmIsExact() const {
520 assert (Kind == k_FPImm && "Invalid access!")((Kind == k_FPImm && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 520, __PRETTY_FUNCTION__))
;
521 return FPImm.IsExact;
522 }
523
524 unsigned getBarrier() const {
525 assert(Kind == k_Barrier && "Invalid access!")((Kind == k_Barrier && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 525, __PRETTY_FUNCTION__))
;
526 return Barrier.Val;
527 }
528
529 StringRef getBarrierName() const {
530 assert(Kind == k_Barrier && "Invalid access!")((Kind == k_Barrier && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 530, __PRETTY_FUNCTION__))
;
531 return StringRef(Barrier.Data, Barrier.Length);
532 }
533
534 unsigned getReg() const override {
535 assert(Kind == k_Register && "Invalid access!")((Kind == k_Register && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 535, __PRETTY_FUNCTION__))
;
536 return Reg.RegNum;
537 }
538
539 RegConstraintEqualityTy getRegEqualityTy() const {
540 assert(Kind == k_Register && "Invalid access!")((Kind == k_Register && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 540, __PRETTY_FUNCTION__))
;
541 return Reg.EqualityTy;
542 }
543
544 unsigned getVectorListStart() const {
545 assert(Kind == k_VectorList && "Invalid access!")((Kind == k_VectorList && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 545, __PRETTY_FUNCTION__))
;
546 return VectorList.RegNum;
547 }
548
549 unsigned getVectorListCount() const {
550 assert(Kind == k_VectorList && "Invalid access!")((Kind == k_VectorList && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 550, __PRETTY_FUNCTION__))
;
551 return VectorList.Count;
552 }
553
554 unsigned getVectorIndex() const {
555 assert(Kind == k_VectorIndex && "Invalid access!")((Kind == k_VectorIndex && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 555, __PRETTY_FUNCTION__))
;
556 return VectorIndex.Val;
557 }
558
559 StringRef getSysReg() const {
560 assert(Kind == k_SysReg && "Invalid access!")((Kind == k_SysReg && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 560, __PRETTY_FUNCTION__))
;
561 return StringRef(SysReg.Data, SysReg.Length);
562 }
563
564 unsigned getSysCR() const {
565 assert(Kind == k_SysCR && "Invalid access!")((Kind == k_SysCR && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 565, __PRETTY_FUNCTION__))
;
566 return SysCRImm.Val;
567 }
568
569 unsigned getPrefetch() const {
570 assert(Kind == k_Prefetch && "Invalid access!")((Kind == k_Prefetch && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 570, __PRETTY_FUNCTION__))
;
571 return Prefetch.Val;
572 }
573
574 unsigned getPSBHint() const {
575 assert(Kind == k_PSBHint && "Invalid access!")((Kind == k_PSBHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 575, __PRETTY_FUNCTION__))
;
576 return PSBHint.Val;
577 }
578
579 StringRef getPSBHintName() const {
580 assert(Kind == k_PSBHint && "Invalid access!")((Kind == k_PSBHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 580, __PRETTY_FUNCTION__))
;
581 return StringRef(PSBHint.Data, PSBHint.Length);
582 }
583
584 unsigned getBTIHint() const {
585 assert(Kind == k_BTIHint && "Invalid access!")((Kind == k_BTIHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 585, __PRETTY_FUNCTION__))
;
586 return BTIHint.Val;
587 }
588
589 StringRef getBTIHintName() const {
590 assert(Kind == k_BTIHint && "Invalid access!")((Kind == k_BTIHint && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 590, __PRETTY_FUNCTION__))
;
591 return StringRef(BTIHint.Data, BTIHint.Length);
592 }
593
594 StringRef getPrefetchName() const {
595 assert(Kind == k_Prefetch && "Invalid access!")((Kind == k_Prefetch && "Invalid access!") ? static_cast
<void> (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 595, __PRETTY_FUNCTION__))
;
596 return StringRef(Prefetch.Data, Prefetch.Length);
597 }
598
599 AArch64_AM::ShiftExtendType getShiftExtendType() const {
600 if (Kind == k_ShiftExtend)
601 return ShiftExtend.Type;
602 if (Kind == k_Register)
603 return Reg.ShiftExtend.Type;
604 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 604)
;
605 }
606
607 unsigned getShiftExtendAmount() const {
608 if (Kind == k_ShiftExtend)
609 return ShiftExtend.Amount;
610 if (Kind == k_Register)
611 return Reg.ShiftExtend.Amount;
612 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 612)
;
613 }
614
615 bool hasShiftExtendAmount() const {
616 if (Kind == k_ShiftExtend)
617 return ShiftExtend.HasExplicitAmount;
618 if (Kind == k_Register)
619 return Reg.ShiftExtend.HasExplicitAmount;
620 llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 620)
;
621 }
622
623 bool isImm() const override { return Kind == k_Immediate; }
624 bool isMem() const override { return false; }
625
626 bool isUImm6() const {
627 if (!isImm())
628 return false;
629 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
630 if (!MCE)
631 return false;
632 int64_t Val = MCE->getValue();
633 return (Val >= 0 && Val < 64);
634 }
635
636 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
637
638 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
639 return isImmScaled<Bits, Scale>(true);
640 }
641
642 template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
643 return isImmScaled<Bits, Scale>(false);
644 }
645
646 template <int Bits, int Scale>
647 DiagnosticPredicate isImmScaled(bool Signed) const {
648 if (!isImm())
649 return DiagnosticPredicateTy::NoMatch;
650
651 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
652 if (!MCE)
653 return DiagnosticPredicateTy::NoMatch;
654
655 int64_t MinVal, MaxVal;
656 if (Signed) {
657 int64_t Shift = Bits - 1;
658 MinVal = (int64_t(1) << Shift) * -Scale;
659 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
660 } else {
661 MinVal = 0;
662 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
663 }
664
665 int64_t Val = MCE->getValue();
666 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
667 return DiagnosticPredicateTy::Match;
668
669 return DiagnosticPredicateTy::NearMatch;
670 }
671
672 DiagnosticPredicate isSVEPattern() const {
673 if (!isImm())
674 return DiagnosticPredicateTy::NoMatch;
675 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
676 if (!MCE)
677 return DiagnosticPredicateTy::NoMatch;
678 int64_t Val = MCE->getValue();
679 if (Val >= 0 && Val < 32)
680 return DiagnosticPredicateTy::Match;
681 return DiagnosticPredicateTy::NearMatch;
682 }
683
684 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
685 AArch64MCExpr::VariantKind ELFRefKind;
686 MCSymbolRefExpr::VariantKind DarwinRefKind;
687 int64_t Addend;
688 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
689 Addend)) {
690 // If we don't understand the expression, assume the best and
691 // let the fixup and relocation code deal with it.
692 return true;
693 }
694
695 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
696 ELFRefKind == AArch64MCExpr::VK_LO12 ||
697 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
698 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
699 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
700 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
701 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
702 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
703 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
704 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
705 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
706 // Note that we don't range-check the addend. It's adjusted modulo page
707 // size when converted, so there is no "out of range" condition when using
708 // @pageoff.
709 return true;
710 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
711 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
712 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
713 return Addend == 0;
714 }
715
716 return false;
717 }
718
719 template <int Scale> bool isUImm12Offset() const {
720 if (!isImm())
721 return false;
722
723 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
724 if (!MCE)
725 return isSymbolicUImm12Offset(getImm());
726
727 int64_t Val = MCE->getValue();
728 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
729 }
730
731 template <int N, int M>
732 bool isImmInRange() const {
733 if (!isImm())
734 return false;
735 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
736 if (!MCE)
737 return false;
738 int64_t Val = MCE->getValue();
739 return (Val >= N && Val <= M);
740 }
741
742 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
743 // a logical immediate can always be represented when inverted.
744 template <typename T>
745 bool isLogicalImm() const {
746 if (!isImm())
747 return false;
748 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
749 if (!MCE)
750 return false;
751
752 int64_t Val = MCE->getValue();
753 int64_t SVal = typename std::make_signed<T>::type(Val);
754 int64_t UVal = typename std::make_unsigned<T>::type(Val);
755 if (Val != SVal && Val != UVal)
756 return false;
757
758 return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8);
759 }
760
761 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
762
763 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
764 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
765 /// immediate that can be shifted by 'Shift'.
766 template <unsigned Width>
767 Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
768 if (isShiftedImm() && Width == getShiftedImmShift())
769 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
770 return std::make_pair(CE->getValue(), Width);
771
772 if (isImm())
773 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
774 int64_t Val = CE->getValue();
775 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
776 return std::make_pair(Val >> Width, Width);
777 else
778 return std::make_pair(Val, 0u);
779 }
780
781 return {};
782 }
783
784 bool isAddSubImm() const {
785 if (!isShiftedImm() && !isImm())
786 return false;
787
788 const MCExpr *Expr;
789
790 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
791 if (isShiftedImm()) {
792 unsigned Shift = ShiftedImm.ShiftAmount;
793 Expr = ShiftedImm.Val;
794 if (Shift != 0 && Shift != 12)
795 return false;
796 } else {
797 Expr = getImm();
798 }
799
800 AArch64MCExpr::VariantKind ELFRefKind;
801 MCSymbolRefExpr::VariantKind DarwinRefKind;
802 int64_t Addend;
803 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
804 DarwinRefKind, Addend)) {
805 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
806 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
807 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
808 || ELFRefKind == AArch64MCExpr::VK_LO12
809 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
810 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
811 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
812 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
813 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
814 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
815 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
816 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
817 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
818 }
819
820 // If it's a constant, it should be a real immediate in range.
821 if (auto ShiftedVal = getShiftedVal<12>())
822 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
823
824 // If it's an expression, we hope for the best and let the fixup/relocation
825 // code deal with it.
826 return true;
827 }
828
829 bool isAddSubImmNeg() const {
830 if (!isShiftedImm() && !isImm())
831 return false;
832
833 // Otherwise it should be a real negative immediate in range.
834 if (auto ShiftedVal = getShiftedVal<12>())
835 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
836
837 return false;
838 }
839
840 // Signed value in the range -128 to +127. For element widths of
841 // 16 bits or higher it may also be a signed multiple of 256 in the
842 // range -32768 to +32512.
843 // For element-width of 8 bits a range of -128 to 255 is accepted,
844 // since a copy of a byte can be either signed/unsigned.
845 template <typename T>
846 DiagnosticPredicate isSVECpyImm() const {
847 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
848 return DiagnosticPredicateTy::NoMatch;
849
850 bool IsByte =
851 std::is_same<int8_t, typename std::make_signed<T>::type>::value;
852 if (auto ShiftedImm = getShiftedVal<8>())
853 if (!(IsByte && ShiftedImm->second) &&
854 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
855 << ShiftedImm->second))
856 return DiagnosticPredicateTy::Match;
857
858 return DiagnosticPredicateTy::NearMatch;
859 }
860
861 // Unsigned value in the range 0 to 255. For element widths of
862 // 16 bits or higher it may also be a signed multiple of 256 in the
863 // range 0 to 65280.
864 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
865 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
866 return DiagnosticPredicateTy::NoMatch;
867
868 bool IsByte =
869 std::is_same<int8_t, typename std::make_signed<T>::type>::value;
870 if (auto ShiftedImm = getShiftedVal<8>())
871 if (!(IsByte && ShiftedImm->second) &&
872 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
873 << ShiftedImm->second))
874 return DiagnosticPredicateTy::Match;
875
876 return DiagnosticPredicateTy::NearMatch;
877 }
878
879 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
880 if (isLogicalImm<T>() && !isSVECpyImm<T>())
881 return DiagnosticPredicateTy::Match;
882 return DiagnosticPredicateTy::NoMatch;
883 }
884
885 bool isCondCode() const { return Kind == k_CondCode; }
886
887 bool isSIMDImmType10() const {
888 if (!isImm())
889 return false;
890 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
891 if (!MCE)
892 return false;
893 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
894 }
895
896 template<int N>
897 bool isBranchTarget() const {
898 if (!isImm())
899 return false;
900 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
901 if (!MCE)
902 return true;
903 int64_t Val = MCE->getValue();
904 if (Val & 0x3)
905 return false;
906 assert(N > 0 && "Branch target immediate cannot be 0 bits!")((N > 0 && "Branch target immediate cannot be 0 bits!"
) ? static_cast<void> (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 906, __PRETTY_FUNCTION__))
;
907 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
908 }
909
910 bool
911 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
912 if (!isImm())
913 return false;
914
915 AArch64MCExpr::VariantKind ELFRefKind;
916 MCSymbolRefExpr::VariantKind DarwinRefKind;
917 int64_t Addend;
918 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
919 DarwinRefKind, Addend)) {
920 return false;
921 }
922 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
923 return false;
924
925 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
926 if (ELFRefKind == AllowedModifiers[i])
927 return true;
928 }
929
930 return false;
931 }
932
933 bool isMovZSymbolG3() const {
934 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
935 }
936
937 bool isMovZSymbolG2() const {
938 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
939 AArch64MCExpr::VK_TPREL_G2,
940 AArch64MCExpr::VK_DTPREL_G2});
941 }
942
943 bool isMovZSymbolG1() const {
944 return isMovWSymbol({
945 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
946 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
947 AArch64MCExpr::VK_DTPREL_G1,
948 });
949 }
950
951 bool isMovZSymbolG0() const {
952 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
953 AArch64MCExpr::VK_TPREL_G0,
954 AArch64MCExpr::VK_DTPREL_G0});
955 }
956
957 bool isMovKSymbolG3() const {
958 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
959 }
960
961 bool isMovKSymbolG2() const {
962 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
963 }
964
965 bool isMovKSymbolG1() const {
966 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
967 AArch64MCExpr::VK_TPREL_G1_NC,
968 AArch64MCExpr::VK_DTPREL_G1_NC});
969 }
970
971 bool isMovKSymbolG0() const {
972 return isMovWSymbol(
973 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
974 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
975 }
976
977 template<int RegWidth, int Shift>
978 bool isMOVZMovAlias() const {
979 if (!isImm()) return false;
980
981 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
982 if (!CE) return false;
983 uint64_t Value = CE->getValue();
984
985 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
986 }
987
988 template<int RegWidth, int Shift>
989 bool isMOVNMovAlias() const {
990 if (!isImm()) return false;
991
992 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
993 if (!CE) return false;
994 uint64_t Value = CE->getValue();
995
996 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
997 }
998
999 bool isFPImm() const {
1000 return Kind == k_FPImm &&
1001 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1002 }
1003
1004 bool isBarrier() const { return Kind == k_Barrier; }
1005 bool isSysReg() const { return Kind == k_SysReg; }
1006
1007 bool isMRSSystemRegister() const {
1008 if (!isSysReg()) return false;
1009
1010 return SysReg.MRSReg != -1U;
1011 }
1012
1013 bool isMSRSystemRegister() const {
1014 if (!isSysReg()) return false;
1015 return SysReg.MSRReg != -1U;
1016 }
1017
1018 bool isSystemPStateFieldWithImm0_1() const {
1019 if (!isSysReg()) return false;
1020 return (SysReg.PStateField == AArch64PState::PAN ||
1021 SysReg.PStateField == AArch64PState::DIT ||
1022 SysReg.PStateField == AArch64PState::UAO ||
1023 SysReg.PStateField == AArch64PState::SSBS);
1024 }
1025
1026 bool isSystemPStateFieldWithImm0_15() const {
1027 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1028 return SysReg.PStateField != -1U;
1029 }
1030
1031 bool isReg() const override {
1032 return Kind == k_Register;
1033 }
1034
1035 bool isScalarReg() const {
1036 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1037 }
1038
1039 bool isNeonVectorReg() const {
1040 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1041 }
1042
1043 bool isNeonVectorRegLo() const {
1044 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1045 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1046 Reg.RegNum);
1047 }
1048
1049 template <unsigned Class> bool isSVEVectorReg() const {
1050 RegKind RK;
1051 switch (Class) {
1052 case AArch64::ZPRRegClassID:
1053 case AArch64::ZPR_3bRegClassID:
1054 case AArch64::ZPR_4bRegClassID:
1055 RK = RegKind::SVEDataVector;
1056 break;
1057 case AArch64::PPRRegClassID:
1058 case AArch64::PPR_3bRegClassID:
1059 RK = RegKind::SVEPredicateVector;
1060 break;
1061 default:
1062 llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class",
"/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1062)
;
1063 }
1064
1065 return (Kind == k_Register && Reg.Kind == RK) &&
1066 AArch64MCRegisterClasses[Class].contains(getReg());
1067 }
1068
1069 template <unsigned Class> bool isFPRasZPR() const {
1070 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1071 AArch64MCRegisterClasses[Class].contains(getReg());
1072 }
1073
1074 template <int ElementWidth, unsigned Class>
1075 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1076 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1077 return DiagnosticPredicateTy::NoMatch;
1078
1079 if (isSVEVectorReg<Class>() &&
1080 (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
1081 return DiagnosticPredicateTy::Match;
1082
1083 return DiagnosticPredicateTy::NearMatch;
1084 }
1085
1086 template <int ElementWidth, unsigned Class>
1087 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1088 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1089 return DiagnosticPredicateTy::NoMatch;
1090
1091 if (isSVEVectorReg<Class>() &&
1092 (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
1093 return DiagnosticPredicateTy::Match;
1094
1095 return DiagnosticPredicateTy::NearMatch;
1096 }
1097
1098 template <int ElementWidth, unsigned Class,
1099 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1100 bool ShiftWidthAlwaysSame>
1101 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1102 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1103 if (!VectorMatch.isMatch())
1104 return DiagnosticPredicateTy::NoMatch;
1105
1106 // Give a more specific diagnostic when the user has explicitly typed in
1107 // a shift-amount that does not match what is expected, but for which
1108 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1109 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1110 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1111 ShiftExtendTy == AArch64_AM::SXTW) &&
1112 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1113 return DiagnosticPredicateTy::NoMatch;
1114
1115 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1116 return DiagnosticPredicateTy::Match;
1117
1118 return DiagnosticPredicateTy::NearMatch;
1119 }
1120
1121 bool isGPR32as64() const {
1122 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1123 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1124 }
1125
1126 bool isGPR64as32() const {
1127 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1128 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1129 }
1130
1131 bool isWSeqPair() const {
1132 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1133 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1134 Reg.RegNum);
1135 }
1136
1137 bool isXSeqPair() const {
1138 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1139 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1140 Reg.RegNum);
1141 }
1142
1143 template<int64_t Angle, int64_t Remainder>
1144 DiagnosticPredicate isComplexRotation() const {
1145 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1146
1147 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1148 if (!CE) return DiagnosticPredicateTy::NoMatch;
1149 uint64_t Value = CE->getValue();
1150
1151 if (Value % Angle == Remainder && Value <= 270)
1152 return DiagnosticPredicateTy::Match;
1153 return DiagnosticPredicateTy::NearMatch;
1154 }
1155
1156 template <unsigned RegClassID> bool isGPR64() const {
1157 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1158 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1159 }
1160
1161 template <unsigned RegClassID, int ExtWidth>
1162 DiagnosticPredicate isGPR64WithShiftExtend() const {
1163 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1164 return DiagnosticPredicateTy::NoMatch;
1165
1166 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1167 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1168 return DiagnosticPredicateTy::Match;
1169 return DiagnosticPredicateTy::NearMatch;
1170 }
1171
1172 /// Is this a vector list with the type implicit (presumably attached to the
1173 /// instruction itself)?
1174 template <RegKind VectorKind, unsigned NumRegs>
1175 bool isImplicitlyTypedVectorList() const {
1176 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1177 VectorList.NumElements == 0 &&
1178 VectorList.RegisterKind == VectorKind;
1179 }
1180
1181 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1182 unsigned ElementWidth>
1183 bool isTypedVectorList() const {
1184 if (Kind != k_VectorList)
1185 return false;
1186 if (VectorList.Count != NumRegs)
1187 return false;
1188 if (VectorList.RegisterKind != VectorKind)
1189 return false;
1190 if (VectorList.ElementWidth != ElementWidth)
1191 return false;
1192 return VectorList.NumElements == NumElements;
1193 }
1194
1195 template <int Min, int Max>
1196 DiagnosticPredicate isVectorIndex() const {
1197 if (Kind != k_VectorIndex)
1198 return DiagnosticPredicateTy::NoMatch;
1199 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1200 return DiagnosticPredicateTy::Match;
1201 return DiagnosticPredicateTy::NearMatch;
1202 }
1203
1204 bool isToken() const override { return Kind == k_Token; }
1205
1206 bool isTokenEqual(StringRef Str) const {
1207 return Kind == k_Token && getToken() == Str;
1208 }
1209 bool isSysCR() const { return Kind == k_SysCR; }
1210 bool isPrefetch() const { return Kind == k_Prefetch; }
1211 bool isPSBHint() const { return Kind == k_PSBHint; }
1212 bool isBTIHint() const { return Kind == k_BTIHint; }
1213 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1214 bool isShifter() const {
1215 if (!isShiftExtend())
1216 return false;
1217
1218 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1219 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1220 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1221 ST == AArch64_AM::MSL);
1222 }
1223
1224 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1225 if (Kind != k_FPImm)
1226 return DiagnosticPredicateTy::NoMatch;
1227
1228 if (getFPImmIsExact()) {
1229 // Lookup the immediate from table of supported immediates.
1230 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1231 assert(Desc && "Unknown enum value")((Desc && "Unknown enum value") ? static_cast<void
> (0) : __assert_fail ("Desc && \"Unknown enum value\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1231, __PRETTY_FUNCTION__))
;
1232
1233 // Calculate its FP value.
1234 APFloat RealVal(APFloat::IEEEdouble());
1235 if (RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero) !=
1236 APFloat::opOK)
1237 llvm_unreachable("FP immediate is not exact")::llvm::llvm_unreachable_internal("FP immediate is not exact"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1237)
;
1238
1239 if (getFPImm().bitwiseIsEqual(RealVal))
1240 return DiagnosticPredicateTy::Match;
1241 }
1242
1243 return DiagnosticPredicateTy::NearMatch;
1244 }
1245
1246 template <unsigned ImmA, unsigned ImmB>
1247 DiagnosticPredicate isExactFPImm() const {
1248 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1249 if ((Res = isExactFPImm<ImmA>()))
1250 return DiagnosticPredicateTy::Match;
1251 if ((Res = isExactFPImm<ImmB>()))
1252 return DiagnosticPredicateTy::Match;
1253 return Res;
1254 }
1255
1256 bool isExtend() const {
1257 if (!isShiftExtend())
1258 return false;
1259
1260 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1261 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1262 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1263 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1264 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1265 ET == AArch64_AM::LSL) &&
1266 getShiftExtendAmount() <= 4;
1267 }
1268
1269 bool isExtend64() const {
1270 if (!isExtend())
1271 return false;
1272 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
1273 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1274 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
1275 }
1276
1277 bool isExtendLSL64() const {
1278 if (!isExtend())
1279 return false;
1280 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1281 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1282 ET == AArch64_AM::LSL) &&
1283 getShiftExtendAmount() <= 4;
1284 }
1285
1286 template<int Width> bool isMemXExtend() const {
1287 if (!isExtend())
1288 return false;
1289 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1290 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1291 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1292 getShiftExtendAmount() == 0);
1293 }
1294
1295 template<int Width> bool isMemWExtend() const {
1296 if (!isExtend())
1297 return false;
1298 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1299 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1300 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1301 getShiftExtendAmount() == 0);
1302 }
1303
1304 template <unsigned width>
1305 bool isArithmeticShifter() const {
1306 if (!isShifter())
1307 return false;
1308
1309 // An arithmetic shifter is LSL, LSR, or ASR.
1310 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1311 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1312 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1313 }
1314
1315 template <unsigned width>
1316 bool isLogicalShifter() const {
1317 if (!isShifter())
1318 return false;
1319
1320 // A logical shifter is LSL, LSR, ASR or ROR.
1321 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1322 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1323 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1324 getShiftExtendAmount() < width;
1325 }
1326
1327 bool isMovImm32Shifter() const {
1328 if (!isShifter())
1329 return false;
1330
1331 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1332 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1333 if (ST != AArch64_AM::LSL)
1334 return false;
1335 uint64_t Val = getShiftExtendAmount();
1336 return (Val == 0 || Val == 16);
1337 }
1338
1339 bool isMovImm64Shifter() const {
1340 if (!isShifter())
1341 return false;
1342
1343 // A MOVi shifter is LSL of 0 or 16.
1344 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1345 if (ST != AArch64_AM::LSL)
1346 return false;
1347 uint64_t Val = getShiftExtendAmount();
1348 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1349 }
1350
1351 bool isLogicalVecShifter() const {
1352 if (!isShifter())
1353 return false;
1354
1355 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1356 unsigned Shift = getShiftExtendAmount();
1357 return getShiftExtendType() == AArch64_AM::LSL &&
1358 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1359 }
1360
1361 bool isLogicalVecHalfWordShifter() const {
1362 if (!isLogicalVecShifter())
1363 return false;
1364
1365 // A logical vector shifter is a left shift by 0 or 8.
1366 unsigned Shift = getShiftExtendAmount();
1367 return getShiftExtendType() == AArch64_AM::LSL &&
1368 (Shift == 0 || Shift == 8);
1369 }
1370
1371 bool isMoveVecShifter() const {
1372 if (!isShiftExtend())
1373 return false;
1374
1375 // A logical vector shifter is a left shift by 8 or 16.
1376 unsigned Shift = getShiftExtendAmount();
1377 return getShiftExtendType() == AArch64_AM::MSL &&
1378 (Shift == 8 || Shift == 16);
1379 }
1380
1381 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1382 // to LDUR/STUR when the offset is not legal for the former but is for
1383 // the latter. As such, in addition to checking for being a legal unscaled
1384 // address, also check that it is not a legal scaled address. This avoids
1385 // ambiguity in the matcher.
1386 template<int Width>
1387 bool isSImm9OffsetFB() const {
1388 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1389 }
1390
1391 bool isAdrpLabel() const {
1392 // Validation was handled during parsing, so we just sanity check that
1393 // something didn't go haywire.
1394 if (!isImm())
1395 return false;
1396
1397 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1398 int64_t Val = CE->getValue();
1399 int64_t Min = - (4096 * (1LL << (21 - 1)));
1400 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1401 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1402 }
1403
1404 return true;
1405 }
1406
1407 bool isAdrLabel() const {
1408 // Validation was handled during parsing, so we just sanity check that
1409 // something didn't go haywire.
1410 if (!isImm())
1411 return false;
1412
1413 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1414 int64_t Val = CE->getValue();
1415 int64_t Min = - (1LL << (21 - 1));
1416 int64_t Max = ((1LL << (21 - 1)) - 1);
1417 return Val >= Min && Val <= Max;
1418 }
1419
1420 return true;
1421 }
1422
1423 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1424 // Add as immediates when possible. Null MCExpr = 0.
1425 if (!Expr)
1426 Inst.addOperand(MCOperand::createImm(0));
1427 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1428 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1429 else
1430 Inst.addOperand(MCOperand::createExpr(Expr));
1431 }
1432
1433 void addRegOperands(MCInst &Inst, unsigned N) const {
1434 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1434, __PRETTY_FUNCTION__))
;
1435 Inst.addOperand(MCOperand::createReg(getReg()));
1436 }
1437
1438 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1439 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1439, __PRETTY_FUNCTION__))
;
1440 assert(((AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1441, __PRETTY_FUNCTION__))
1441 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1441, __PRETTY_FUNCTION__))
;
1442
1443 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1444 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1445 RI->getEncodingValue(getReg()));
1446
1447 Inst.addOperand(MCOperand::createReg(Reg));
1448 }
1449
1450 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1451 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1451, __PRETTY_FUNCTION__))
;
1452 assert(((AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1453, __PRETTY_FUNCTION__))
1453 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1453, __PRETTY_FUNCTION__))
;
1454
1455 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1456 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1457 RI->getEncodingValue(getReg()));
1458
1459 Inst.addOperand(MCOperand::createReg(Reg));
1460 }
1461
1462 template <int Width>
1463 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1464 unsigned Base;
1465 switch (Width) {
1466 case 8: Base = AArch64::B0; break;
1467 case 16: Base = AArch64::H0; break;
1468 case 32: Base = AArch64::S0; break;
1469 case 64: Base = AArch64::D0; break;
1470 case 128: Base = AArch64::Q0; break;
1471 default:
1472 llvm_unreachable("Unsupported width")::llvm::llvm_unreachable_internal("Unsupported width", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1472)
;
1473 }
1474 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1475 }
1476
1477 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1478 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1478, __PRETTY_FUNCTION__))
;
1479 assert(((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1480, __PRETTY_FUNCTION__))
1480 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1480, __PRETTY_FUNCTION__))
;
1481 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1482 }
1483
1484 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1485 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1485, __PRETTY_FUNCTION__))
;
1486 assert(((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1487, __PRETTY_FUNCTION__))
1487 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains
(getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1487, __PRETTY_FUNCTION__))
;
1488 Inst.addOperand(MCOperand::createReg(getReg()));
1489 }
1490
1491 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1492 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1492, __PRETTY_FUNCTION__))
;
1493 Inst.addOperand(MCOperand::createReg(getReg()));
1494 }
1495
1496 enum VecListIndexType {
1497 VecListIdx_DReg = 0,
1498 VecListIdx_QReg = 1,
1499 VecListIdx_ZReg = 2,
1500 };
1501
1502 template <VecListIndexType RegTy, unsigned NumRegs>
1503 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1504 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1504, __PRETTY_FUNCTION__))
;
1505 static const unsigned FirstRegs[][5] = {
1506 /* DReg */ { AArch64::Q0,
1507 AArch64::D0, AArch64::D0_D1,
1508 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1509 /* QReg */ { AArch64::Q0,
1510 AArch64::Q0, AArch64::Q0_Q1,
1511 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1512 /* ZReg */ { AArch64::Z0,
1513 AArch64::Z0, AArch64::Z0_Z1,
1514 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1515 };
1516
1517 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs"
) ? static_cast<void> (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1518, __PRETTY_FUNCTION__))
1518 " NumRegs must be <= 4 for ZRegs")(((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs"
) ? static_cast<void> (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1518, __PRETTY_FUNCTION__))
;
1519
1520 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1521 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1522 FirstRegs[(unsigned)RegTy][0]));
1523 }
1524
1525 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1526 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1526, __PRETTY_FUNCTION__))
;
1527 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1528 }
1529
1530 template <unsigned ImmIs0, unsigned ImmIs1>
1531 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1532 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1532, __PRETTY_FUNCTION__))
;
1533 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")((bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand"
) ? static_cast<void> (0) : __assert_fail ("bool(isExactFPImm<ImmIs0, ImmIs1>()) && \"Invalid operand\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1533, __PRETTY_FUNCTION__))
;
1534 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1535 }
1536
1537 void addImmOperands(MCInst &Inst, unsigned N) const {
1538 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1538, __PRETTY_FUNCTION__))
;
1539 // If this is a pageoff symrefexpr with an addend, adjust the addend
1540 // to be only the page-offset portion. Otherwise, just add the expr
1541 // as-is.
1542 addExpr(Inst, getImm());
1543 }
1544
1545 template <int Shift>
1546 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1547 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1547, __PRETTY_FUNCTION__))
;
1548 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1549 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1550 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1551 } else if (isShiftedImm()) {
1552 addExpr(Inst, getShiftedImmVal());
1553 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1554 } else {
1555 addExpr(Inst, getImm());
1556 Inst.addOperand(MCOperand::createImm(0));
1557 }
1558 }
1559
1560 template <int Shift>
1561 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1562 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1562, __PRETTY_FUNCTION__))
;
1563 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1564 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1565 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1566 } else
1567 llvm_unreachable("Not a shifted negative immediate")::llvm::llvm_unreachable_internal("Not a shifted negative immediate"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1567)
;
1568 }
1569
1570 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1571 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1571, __PRETTY_FUNCTION__))
;
1572 Inst.addOperand(MCOperand::createImm(getCondCode()));
1573 }
1574
1575 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1576 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1576, __PRETTY_FUNCTION__))
;
1577 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1578 if (!MCE)
1579 addExpr(Inst, getImm());
1580 else
1581 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1582 }
1583
1584 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1585 addImmOperands(Inst, N);
1586 }
1587
1588 template<int Scale>
1589 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1590 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1590, __PRETTY_FUNCTION__))
;
1591 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1592
1593 if (!MCE) {
1594 Inst.addOperand(MCOperand::createExpr(getImm()));
1595 return;
1596 }
1597 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1598 }
1599
1600 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1601 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1601, __PRETTY_FUNCTION__))
;
1602 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1603 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1604 }
1605
1606 template <int Scale>
1607 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1608 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1608, __PRETTY_FUNCTION__))
;
1609 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1610 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1611 }
1612
1613 template <typename T>
1614 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1615 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1615, __PRETTY_FUNCTION__))
;
1616 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1617 typename std::make_unsigned<T>::type Val = MCE->getValue();
1618 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1619 Inst.addOperand(MCOperand::createImm(encoding));
1620 }
1621
1622 template <typename T>
1623 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1624 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1624, __PRETTY_FUNCTION__))
;
1625 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1626 typename std::make_unsigned<T>::type Val = ~MCE->getValue();
1627 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1628 Inst.addOperand(MCOperand::createImm(encoding));
1629 }
1630
1631 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1632 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1632, __PRETTY_FUNCTION__))
;
1633 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1634 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1635 Inst.addOperand(MCOperand::createImm(encoding));
1636 }
1637
1638 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1639 // Branch operands don't encode the low bits, so shift them off
1640 // here. If it's a label, however, just put it on directly as there's
1641 // not enough information now to do anything.
1642 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1642, __PRETTY_FUNCTION__))
;
1643 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1644 if (!MCE) {
1645 addExpr(Inst, getImm());
1646 return;
1647 }
1648 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1648, __PRETTY_FUNCTION__))
;
1649 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1650 }
1651
1652 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1653 // Branch operands don't encode the low bits, so shift them off
1654 // here. If it's a label, however, just put it on directly as there's
1655 // not enough information now to do anything.
1656 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1656, __PRETTY_FUNCTION__))
;
1657 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1658 if (!MCE) {
1659 addExpr(Inst, getImm());
1660 return;
1661 }
1662 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1662, __PRETTY_FUNCTION__))
;
1663 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1664 }
1665
1666 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1667 // Branch operands don't encode the low bits, so shift them off
1668 // here. If it's a label, however, just put it on directly as there's
1669 // not enough information now to do anything.
1670 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1670, __PRETTY_FUNCTION__))
;
1671 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1672 if (!MCE) {
1673 addExpr(Inst, getImm());
1674 return;
1675 }
1676 assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast
<void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1676, __PRETTY_FUNCTION__))
;
1677 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1678 }
1679
1680 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1681 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1681, __PRETTY_FUNCTION__))
;
1682 Inst.addOperand(MCOperand::createImm(
1683 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1684 }
1685
1686 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1687 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1687, __PRETTY_FUNCTION__))
;
1688 Inst.addOperand(MCOperand::createImm(getBarrier()));
1689 }
1690
1691 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1692 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1692, __PRETTY_FUNCTION__))
;
1693
1694 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1695 }
1696
1697 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1698 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1698, __PRETTY_FUNCTION__))
;
1699
1700 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1701 }
1702
1703 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1704 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1704, __PRETTY_FUNCTION__))
;
1705
1706 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1707 }
1708
1709 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1710 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1710, __PRETTY_FUNCTION__))
;
1711
1712 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1713 }
1714
1715 void addSysCROperands(MCInst &Inst, unsigned N) const {
1716 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1716, __PRETTY_FUNCTION__))
;
1717 Inst.addOperand(MCOperand::createImm(getSysCR()));
1718 }
1719
1720 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1721 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1721, __PRETTY_FUNCTION__))
;
1722 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1723 }
1724
1725 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1726 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1726, __PRETTY_FUNCTION__))
;
1727 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1728 }
1729
1730 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1731 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1731, __PRETTY_FUNCTION__))
;
1732 Inst.addOperand(MCOperand::createImm(getBTIHint()));
1733 }
1734
1735 void addShifterOperands(MCInst &Inst, unsigned N) const {
1736 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1736, __PRETTY_FUNCTION__))
;
1737 unsigned Imm =
1738 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1739 Inst.addOperand(MCOperand::createImm(Imm));
1740 }
1741
1742 void addExtendOperands(MCInst &Inst, unsigned N) const {
1743 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1743, __PRETTY_FUNCTION__))
;
1744 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1745 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1746 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1747 Inst.addOperand(MCOperand::createImm(Imm));
1748 }
1749
1750 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1751 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1751, __PRETTY_FUNCTION__))
;
1752 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1753 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1754 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1755 Inst.addOperand(MCOperand::createImm(Imm));
1756 }
1757
1758 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1759 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1759, __PRETTY_FUNCTION__))
;
1760 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1761 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1762 Inst.addOperand(MCOperand::createImm(IsSigned));
1763 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1764 }
1765
1766 // For 8-bit load/store instructions with a register offset, both the
1767 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1768 // they're disambiguated by whether the shift was explicit or implicit rather
1769 // than its size.
1770 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1771 assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1771, __PRETTY_FUNCTION__))
;
1772 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1773 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1774 Inst.addOperand(MCOperand::createImm(IsSigned));
1775 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1776 }
1777
1778 template<int Shift>
1779 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1780 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1780, __PRETTY_FUNCTION__))
;
1781
1782 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1783 uint64_t Value = CE->getValue();
1784 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1785 }
1786
1787 template<int Shift>
1788 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1789 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1789, __PRETTY_FUNCTION__))
;
1790
1791 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1792 uint64_t Value = CE->getValue();
1793 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1794 }
1795
1796 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1797 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1797, __PRETTY_FUNCTION__))
;
1798 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1799 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1800 }
1801
1802 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1803 assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast
<void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1803, __PRETTY_FUNCTION__))
;
1804 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1805 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1806 }
1807
1808 void print(raw_ostream &OS) const override;
1809
1810 static std::unique_ptr<AArch64Operand>
1811 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1812 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1813 Op->Tok.Data = Str.data();
1814 Op->Tok.Length = Str.size();
1815 Op->Tok.IsSuffix = IsSuffix;
1816 Op->StartLoc = S;
1817 Op->EndLoc = S;
1818 return Op;
1819 }
1820
1821 static std::unique_ptr<AArch64Operand>
1822 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1823 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1824 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1825 unsigned ShiftAmount = 0,
1826 unsigned HasExplicitAmount = false) {
1827 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1828 Op->Reg.RegNum = RegNum;
1829 Op->Reg.Kind = Kind;
1830 Op->Reg.ElementWidth = 0;
1831 Op->Reg.EqualityTy = EqTy;
1832 Op->Reg.ShiftExtend.Type = ExtTy;
1833 Op->Reg.ShiftExtend.Amount = ShiftAmount;
1834 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1835 Op->StartLoc = S;
1836 Op->EndLoc = E;
1837 return Op;
1838 }
1839
1840 static std::unique_ptr<AArch64Operand>
1841 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1842 SMLoc S, SMLoc E, MCContext &Ctx,
1843 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1844 unsigned ShiftAmount = 0,
1845 unsigned HasExplicitAmount = false) {
1846 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1848, __PRETTY_FUNCTION__))
1847 Kind == RegKind::SVEPredicateVector) &&(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1848, __PRETTY_FUNCTION__))
1848 "Invalid vector kind")(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector
|| Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"
) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1848, __PRETTY_FUNCTION__))
;
1849 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1850 HasExplicitAmount);
1851 Op->Reg.ElementWidth = ElementWidth;
1852 return Op;
1853 }
1854
1855 static std::unique_ptr<AArch64Operand>
1856 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1857 unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1858 MCContext &Ctx) {
1859 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1860 Op->VectorList.RegNum = RegNum;
1861 Op->VectorList.Count = Count;
1862 Op->VectorList.NumElements = NumElements;
1863 Op->VectorList.ElementWidth = ElementWidth;
1864 Op->VectorList.RegisterKind = RegisterKind;
1865 Op->StartLoc = S;
1866 Op->EndLoc = E;
1867 return Op;
1868 }
1869
1870 static std::unique_ptr<AArch64Operand>
1871 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1872 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1873 Op->VectorIndex.Val = Idx;
1874 Op->StartLoc = S;
1875 Op->EndLoc = E;
1876 return Op;
1877 }
1878
1879 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1880 SMLoc E, MCContext &Ctx) {
1881 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1882 Op->Imm.Val = Val;
1883 Op->StartLoc = S;
1884 Op->EndLoc = E;
1885 return Op;
1886 }
1887
1888 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1889 unsigned ShiftAmount,
1890 SMLoc S, SMLoc E,
1891 MCContext &Ctx) {
1892 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1893 Op->ShiftedImm .Val = Val;
1894 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1895 Op->StartLoc = S;
1896 Op->EndLoc = E;
1897 return Op;
1898 }
1899
1900 static std::unique_ptr<AArch64Operand>
1901 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1902 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1903 Op->CondCode.Code = Code;
1904 Op->StartLoc = S;
1905 Op->EndLoc = E;
1906 return Op;
1907 }
1908
1909 static std::unique_ptr<AArch64Operand>
1910 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1911 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1912 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1913 Op->FPImm.IsExact = IsExact;
1914 Op->StartLoc = S;
1915 Op->EndLoc = S;
1916 return Op;
1917 }
1918
1919 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1920 StringRef Str,
1921 SMLoc S,
1922 MCContext &Ctx) {
1923 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1924 Op->Barrier.Val = Val;
1925 Op->Barrier.Data = Str.data();
1926 Op->Barrier.Length = Str.size();
1927 Op->StartLoc = S;
1928 Op->EndLoc = S;
1929 return Op;
1930 }
1931
1932 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1933 uint32_t MRSReg,
1934 uint32_t MSRReg,
1935 uint32_t PStateField,
1936 MCContext &Ctx) {
1937 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1938 Op->SysReg.Data = Str.data();
1939 Op->SysReg.Length = Str.size();
1940 Op->SysReg.MRSReg = MRSReg;
1941 Op->SysReg.MSRReg = MSRReg;
1942 Op->SysReg.PStateField = PStateField;
1943 Op->StartLoc = S;
1944 Op->EndLoc = S;
1945 return Op;
1946 }
1947
1948 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1949 SMLoc E, MCContext &Ctx) {
1950 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1951 Op->SysCRImm.Val = Val;
1952 Op->StartLoc = S;
1953 Op->EndLoc = E;
1954 return Op;
1955 }
1956
1957 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1958 StringRef Str,
1959 SMLoc S,
1960 MCContext &Ctx) {
1961 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1962 Op->Prefetch.Val = Val;
1963 Op->Barrier.Data = Str.data();
1964 Op->Barrier.Length = Str.size();
1965 Op->StartLoc = S;
1966 Op->EndLoc = S;
1967 return Op;
1968 }
1969
1970 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1971 StringRef Str,
1972 SMLoc S,
1973 MCContext &Ctx) {
1974 auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1975 Op->PSBHint.Val = Val;
1976 Op->PSBHint.Data = Str.data();
1977 Op->PSBHint.Length = Str.size();
1978 Op->StartLoc = S;
1979 Op->EndLoc = S;
1980 return Op;
1981 }
1982
1983 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
1984 StringRef Str,
1985 SMLoc S,
1986 MCContext &Ctx) {
1987 auto Op = make_unique<AArch64Operand>(k_BTIHint, Ctx);
1988 Op->BTIHint.Val = Val << 1 | 32;
1989 Op->BTIHint.Data = Str.data();
1990 Op->BTIHint.Length = Str.size();
1991 Op->StartLoc = S;
1992 Op->EndLoc = S;
1993 return Op;
1994 }
1995
1996 static std::unique_ptr<AArch64Operand>
1997 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1998 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1999 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2000 Op->ShiftExtend.Type = ShOp;
2001 Op->ShiftExtend.Amount = Val;
2002 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2003 Op->StartLoc = S;
2004 Op->EndLoc = E;
2005 return Op;
2006 }
2007};
2008
2009} // end anonymous namespace.
2010
2011void AArch64Operand::print(raw_ostream &OS) const {
2012 switch (Kind) {
2013 case k_FPImm:
2014 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2015 if (!getFPImmIsExact())
2016 OS << " (inexact)";
2017 OS << ">";
2018 break;
2019 case k_Barrier: {
2020 StringRef Name = getBarrierName();
2021 if (!Name.empty())
2022 OS << "<barrier " << Name << ">";
2023 else
2024 OS << "<barrier invalid #" << getBarrier() << ">";
2025 break;
2026 }
2027 case k_Immediate:
2028 OS << *getImm();
2029 break;
2030 case k_ShiftedImm: {
2031 unsigned Shift = getShiftedImmShift();
2032 OS << "<shiftedimm ";
2033 OS << *getShiftedImmVal();
2034 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2035 break;
2036 }
2037 case k_CondCode:
2038 OS << "<condcode " << getCondCode() << ">";
2039 break;
2040 case k_VectorList: {
2041 OS << "<vectorlist ";
2042 unsigned Reg = getVectorListStart();
2043 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2044 OS << Reg + i << " ";
2045 OS << ">";
2046 break;
2047 }
2048 case k_VectorIndex:
2049 OS << "<vectorindex " << getVectorIndex() << ">";
2050 break;
2051 case k_SysReg:
2052 OS << "<sysreg: " << getSysReg() << '>';
2053 break;
2054 case k_Token:
2055 OS << "'" << getToken() << "'";
2056 break;
2057 case k_SysCR:
2058 OS << "c" << getSysCR();
2059 break;
2060 case k_Prefetch: {
2061 StringRef Name = getPrefetchName();
2062 if (!Name.empty())
2063 OS << "<prfop " << Name << ">";
2064 else
2065 OS << "<prfop invalid #" << getPrefetch() << ">";
2066 break;
2067 }
2068 case k_PSBHint:
2069 OS << getPSBHintName();
2070 break;
2071 case k_Register:
2072 OS << "<register " << getReg() << ">";
2073 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2074 break;
2075 LLVM_FALLTHROUGH[[clang::fallthrough]];
2076 case k_BTIHint:
2077 OS << getBTIHintName();
2078 break;
2079 case k_ShiftExtend:
2080 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2081 << getShiftExtendAmount();
2082 if (!hasShiftExtendAmount())
2083 OS << "<imp>";
2084 OS << '>';
2085 break;
2086 }
2087}
2088
2089/// @name Auto-generated Match Functions
2090/// {
2091
2092static unsigned MatchRegisterName(StringRef Name);
2093
2094/// }
2095
2096static unsigned MatchNeonVectorRegName(StringRef Name) {
2097 return StringSwitch<unsigned>(Name.lower())
2098 .Case("v0", AArch64::Q0)
2099 .Case("v1", AArch64::Q1)
2100 .Case("v2", AArch64::Q2)
2101 .Case("v3", AArch64::Q3)
2102 .Case("v4", AArch64::Q4)
2103 .Case("v5", AArch64::Q5)
2104 .Case("v6", AArch64::Q6)
2105 .Case("v7", AArch64::Q7)
2106 .Case("v8", AArch64::Q8)
2107 .Case("v9", AArch64::Q9)
2108 .Case("v10", AArch64::Q10)
2109 .Case("v11", AArch64::Q11)
2110 .Case("v12", AArch64::Q12)
2111 .Case("v13", AArch64::Q13)
2112 .Case("v14", AArch64::Q14)
2113 .Case("v15", AArch64::Q15)
2114 .Case("v16", AArch64::Q16)
2115 .Case("v17", AArch64::Q17)
2116 .Case("v18", AArch64::Q18)
2117 .Case("v19", AArch64::Q19)
2118 .Case("v20", AArch64::Q20)
2119 .Case("v21", AArch64::Q21)
2120 .Case("v22", AArch64::Q22)
2121 .Case("v23", AArch64::Q23)
2122 .Case("v24", AArch64::Q24)
2123 .Case("v25", AArch64::Q25)
2124 .Case("v26", AArch64::Q26)
2125 .Case("v27", AArch64::Q27)
2126 .Case("v28", AArch64::Q28)
2127 .Case("v29", AArch64::Q29)
2128 .Case("v30", AArch64::Q30)
2129 .Case("v31", AArch64::Q31)
2130 .Default(0);
2131}
2132
2133/// Returns an optional pair of (#elements, element-width) if Suffix
2134/// is a valid vector kind. Where the number of elements in a vector
2135/// or the vector width is implicit or explicitly unknown (but still a
2136/// valid suffix kind), 0 is used.
2137static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2138 RegKind VectorKind) {
2139 std::pair<int, int> Res = {-1, -1};
2140
2141 switch (VectorKind) {
2142 case RegKind::NeonVector:
2143 Res =
2144 StringSwitch<std::pair<int, int>>(Suffix.lower())
2145 .Case("", {0, 0})
2146 .Case(".1d", {1, 64})
2147 .Case(".1q", {1, 128})
2148 // '.2h' needed for fp16 scalar pairwise reductions
2149 .Case(".2h", {2, 16})
2150 .Case(".2s", {2, 32})
2151 .Case(".2d", {2, 64})
2152 // '.4b' is another special case for the ARMv8.2a dot product
2153 // operand
2154 .Case(".4b", {4, 8})
2155 .Case(".4h", {4, 16})
2156 .Case(".4s", {4, 32})
2157 .Case(".8b", {8, 8})
2158 .Case(".8h", {8, 16})
2159 .Case(".16b", {16, 8})
2160 // Accept the width neutral ones, too, for verbose syntax. If those
2161 // aren't used in the right places, the token operand won't match so
2162 // all will work out.
2163 .Case(".b", {0, 8})
2164 .Case(".h", {0, 16})
2165 .Case(".s", {0, 32})
2166 .Case(".d", {0, 64})
2167 .Default({-1, -1});
2168 break;
2169 case RegKind::SVEPredicateVector:
2170 case RegKind::SVEDataVector:
2171 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2172 .Case("", {0, 0})
2173 .Case(".b", {0, 8})
2174 .Case(".h", {0, 16})
2175 .Case(".s", {0, 32})
2176 .Case(".d", {0, 64})
2177 .Case(".q", {0, 128})
2178 .Default({-1, -1});
2179 break;
2180 default:
2181 llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2181)
;
2182 }
2183
2184 if (Res == std::make_pair(-1, -1))
2185 return Optional<std::pair<int, int>>();
2186
2187 return Optional<std::pair<int, int>>(Res);
2188}
2189
2190static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2191 return parseVectorKind(Suffix, VectorKind).hasValue();
2192}
2193
2194static unsigned matchSVEDataVectorRegName(StringRef Name) {
2195 return StringSwitch<unsigned>(Name.lower())
2196 .Case("z0", AArch64::Z0)
2197 .Case("z1", AArch64::Z1)
2198 .Case("z2", AArch64::Z2)
2199 .Case("z3", AArch64::Z3)
2200 .Case("z4", AArch64::Z4)
2201 .Case("z5", AArch64::Z5)
2202 .Case("z6", AArch64::Z6)
2203 .Case("z7", AArch64::Z7)
2204 .Case("z8", AArch64::Z8)
2205 .Case("z9", AArch64::Z9)
2206 .Case("z10", AArch64::Z10)
2207 .Case("z11", AArch64::Z11)
2208 .Case("z12", AArch64::Z12)
2209 .Case("z13", AArch64::Z13)
2210 .Case("z14", AArch64::Z14)
2211 .Case("z15", AArch64::Z15)
2212 .Case("z16", AArch64::Z16)
2213 .Case("z17", AArch64::Z17)
2214 .Case("z18", AArch64::Z18)
2215 .Case("z19", AArch64::Z19)
2216 .Case("z20", AArch64::Z20)
2217 .Case("z21", AArch64::Z21)
2218 .Case("z22", AArch64::Z22)
2219 .Case("z23", AArch64::Z23)
2220 .Case("z24", AArch64::Z24)
2221 .Case("z25", AArch64::Z25)
2222 .Case("z26", AArch64::Z26)
2223 .Case("z27", AArch64::Z27)
2224 .Case("z28", AArch64::Z28)
2225 .Case("z29", AArch64::Z29)
2226 .Case("z30", AArch64::Z30)
2227 .Case("z31", AArch64::Z31)
2228 .Default(0);
2229}
2230
2231static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2232 return StringSwitch<unsigned>(Name.lower())
2233 .Case("p0", AArch64::P0)
2234 .Case("p1", AArch64::P1)
2235 .Case("p2", AArch64::P2)
2236 .Case("p3", AArch64::P3)
2237 .Case("p4", AArch64::P4)
2238 .Case("p5", AArch64::P5)
2239 .Case("p6", AArch64::P6)
2240 .Case("p7", AArch64::P7)
2241 .Case("p8", AArch64::P8)
2242 .Case("p9", AArch64::P9)
2243 .Case("p10", AArch64::P10)
2244 .Case("p11", AArch64::P11)
2245 .Case("p12", AArch64::P12)
2246 .Case("p13", AArch64::P13)
2247 .Case("p14", AArch64::P14)
2248 .Case("p15", AArch64::P15)
2249 .Default(0);
2250}
2251
2252bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2253 SMLoc &EndLoc) {
2254 StartLoc = getLoc();
2255 auto Res = tryParseScalarRegister(RegNo);
2256 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2257 return Res != MatchOperand_Success;
2258}
2259
2260// Matches a register name or register alias previously defined by '.req'
2261unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2262 RegKind Kind) {
2263 unsigned RegNum = 0;
2264 if ((RegNum = matchSVEDataVectorRegName(Name)))
2265 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2266
2267 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2268 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2269
2270 if ((RegNum = MatchNeonVectorRegName(Name)))
2271 return Kind == RegKind::NeonVector ? RegNum : 0;
2272
2273 // The parsed register must be of RegKind Scalar
2274 if ((RegNum = MatchRegisterName(Name)))
2275 return Kind == RegKind::Scalar ? RegNum : 0;
2276
2277 if (!RegNum) {
2278 // Handle a few common aliases of registers.
2279 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2280 .Case("fp", AArch64::FP)
2281 .Case("lr", AArch64::LR)
2282 .Case("x31", AArch64::XZR)
2283 .Case("w31", AArch64::WZR)
2284 .Default(0))
2285 return Kind == RegKind::Scalar ? RegNum : 0;
2286
2287 // Check for aliases registered via .req. Canonicalize to lower case.
2288 // That's more consistent since register names are case insensitive, and
2289 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2290 auto Entry = RegisterReqs.find(Name.lower());
2291 if (Entry == RegisterReqs.end())
2292 return 0;
2293
2294 // set RegNum if the match is the right kind of register
2295 if (Kind == Entry->getValue().first)
2296 RegNum = Entry->getValue().second;
2297 }
2298 return RegNum;
2299}
2300
2301/// tryParseScalarRegister - Try to parse a register name. The token must be an
2302/// Identifier when called, and if it is a register name the token is eaten and
2303/// the register is added to the operand list.
2304OperandMatchResultTy
2305AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2306 MCAsmParser &Parser = getParser();
2307 const AsmToken &Tok = Parser.getTok();
2308 if (Tok.isNot(AsmToken::Identifier))
2309 return MatchOperand_NoMatch;
2310
2311 std::string lowerCase = Tok.getString().lower();
2312 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2313 if (Reg == 0)
2314 return MatchOperand_NoMatch;
2315
2316 RegNum = Reg;
2317 Parser.Lex(); // Eat identifier token.
2318 return MatchOperand_Success;
2319}
2320
2321/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2322OperandMatchResultTy
2323AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2324 MCAsmParser &Parser = getParser();
2325 SMLoc S = getLoc();
2326
2327 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2328 Error(S, "Expected cN operand where 0 <= N <= 15");
2329 return MatchOperand_ParseFail;
2330 }
2331
2332 StringRef Tok = Parser.getTok().getIdentifier();
2333 if (Tok[0] != 'c' && Tok[0] != 'C') {
2334 Error(S, "Expected cN operand where 0 <= N <= 15");
2335 return MatchOperand_ParseFail;
2336 }
2337
2338 uint32_t CRNum;
2339 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2340 if (BadNum || CRNum > 15) {
2341 Error(S, "Expected cN operand where 0 <= N <= 15");
2342 return MatchOperand_ParseFail;
2343 }
2344
2345 Parser.Lex(); // Eat identifier token.
2346 Operands.push_back(
2347 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2348 return MatchOperand_Success;
2349}
2350
2351/// tryParsePrefetch - Try to parse a prefetch operand.
2352template <bool IsSVEPrefetch>
2353OperandMatchResultTy
2354AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2355 MCAsmParser &Parser = getParser();
2356 SMLoc S = getLoc();
2357 const AsmToken &Tok = Parser.getTok();
2358
2359 auto LookupByName = [](StringRef N) {
2360 if (IsSVEPrefetch) {
2361 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2362 return Optional<unsigned>(Res->Encoding);
2363 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2364 return Optional<unsigned>(Res->Encoding);
2365 return Optional<unsigned>();
2366 };
2367
2368 auto LookupByEncoding = [](unsigned E) {
2369 if (IsSVEPrefetch) {
2370 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2371 return Optional<StringRef>(Res->Name);
2372 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2373 return Optional<StringRef>(Res->Name);
2374 return Optional<StringRef>();
2375 };
2376 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2377
2378 // Either an identifier for named values or a 5-bit immediate.
2379 // Eat optional hash.
2380 if (parseOptionalToken(AsmToken::Hash) ||
2381 Tok.is(AsmToken::Integer)) {
2382 const MCExpr *ImmVal;
2383 if (getParser().parseExpression(ImmVal))
2384 return MatchOperand_ParseFail;
2385
2386 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2387 if (!MCE) {
2388 TokError("immediate value expected for prefetch operand");
2389 return MatchOperand_ParseFail;
2390 }
2391 unsigned prfop = MCE->getValue();
2392 if (prfop > MaxVal) {
2393 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2394 "] expected");
2395 return MatchOperand_ParseFail;
2396 }
2397
2398 auto PRFM = LookupByEncoding(MCE->getValue());
2399 Operands.push_back(AArch64Operand::CreatePrefetch(
2400 prfop, PRFM.getValueOr(""), S, getContext()));
2401 return MatchOperand_Success;
2402 }
2403
2404 if (Tok.isNot(AsmToken::Identifier)) {
2405 TokError("prefetch hint expected");
2406 return MatchOperand_ParseFail;
2407 }
2408
2409 auto PRFM = LookupByName(Tok.getString());
2410 if (!PRFM) {
2411 TokError("prefetch hint expected");
2412 return MatchOperand_ParseFail;
2413 }
2414
2415 Parser.Lex(); // Eat identifier token.
2416 Operands.push_back(AArch64Operand::CreatePrefetch(
2417 *PRFM, Tok.getString(), S, getContext()));
2418 return MatchOperand_Success;
2419}
2420
2421/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2422OperandMatchResultTy
2423AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2424 MCAsmParser &Parser = getParser();
2425 SMLoc S = getLoc();
2426 const AsmToken &Tok = Parser.getTok();
2427 if (Tok.isNot(AsmToken::Identifier)) {
2428 TokError("invalid operand for instruction");
2429 return MatchOperand_ParseFail;
2430 }
2431
2432 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2433 if (!PSB) {
2434 TokError("invalid operand for instruction");
2435 return MatchOperand_ParseFail;
2436 }
2437
2438 Parser.Lex(); // Eat identifier token.
2439 Operands.push_back(AArch64Operand::CreatePSBHint(
2440 PSB->Encoding, Tok.getString(), S, getContext()));
2441 return MatchOperand_Success;
2442}
2443
2444/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2445OperandMatchResultTy
2446AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2447 MCAsmParser &Parser = getParser();
2448 SMLoc S = getLoc();
2449 const AsmToken &Tok = Parser.getTok();
2450 if (Tok.isNot(AsmToken::Identifier)) {
2451 TokError("invalid operand for instruction");
2452 return MatchOperand_ParseFail;
2453 }
2454
2455 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2456 if (!BTI) {
2457 TokError("invalid operand for instruction");
2458 return MatchOperand_ParseFail;
2459 }
2460
2461 Parser.Lex(); // Eat identifier token.
2462 Operands.push_back(AArch64Operand::CreateBTIHint(
2463 BTI->Encoding, Tok.getString(), S, getContext()));
2464 return MatchOperand_Success;
2465}
2466
2467/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2468/// instruction.
2469OperandMatchResultTy
2470AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2471 MCAsmParser &Parser = getParser();
2472 SMLoc S = getLoc();
2473 const MCExpr *Expr;
2474
2475 if (Parser.getTok().is(AsmToken::Hash)) {
2476 Parser.Lex(); // Eat hash token.
2477 }
2478
2479 if (parseSymbolicImmVal(Expr))
2480 return MatchOperand_ParseFail;
2481
2482 AArch64MCExpr::VariantKind ELFRefKind;
2483 MCSymbolRefExpr::VariantKind DarwinRefKind;
2484 int64_t Addend;
2485 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2486 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2487 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2488 // No modifier was specified at all; this is the syntax for an ELF basic
2489 // ADRP relocation (unfortunately).
2490 Expr =
2491 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2492 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2493 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2494 Addend != 0) {
2495 Error(S, "gotpage label reference not allowed an addend");
2496 return MatchOperand_ParseFail;
2497 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2498 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2499 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2500 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2501 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2502 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2503 // The operand must be an @page or @gotpage qualified symbolref.
2504 Error(S, "page or gotpage label reference expected");
2505 return MatchOperand_ParseFail;
2506 }
2507 }
2508
2509 // We have either a label reference possibly with addend or an immediate. The
2510 // addend is a raw value here. The linker will adjust it to only reference the
2511 // page.
2512 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2513 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2514
2515 return MatchOperand_Success;
2516}
2517
2518/// tryParseAdrLabel - Parse and validate a source label for the ADR
2519/// instruction.
2520OperandMatchResultTy
2521AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2522 SMLoc S = getLoc();
2523 const MCExpr *Expr;
1
'Expr' declared without an initial value
2524
2525 // Leave anything with a bracket to the default for SVE
2526 if (getParser().getTok().is(AsmToken::LBrac))
2
Taking false branch
2527 return MatchOperand_NoMatch;
2528
2529 if (getParser().getTok().is(AsmToken::Hash))
3
Taking false branch
2530 getParser().Lex(); // Eat hash token.
2531
2532 if (parseSymbolicImmVal(Expr))
4
Calling 'AArch64AsmParser::parseSymbolicImmVal'
9
Returning from 'AArch64AsmParser::parseSymbolicImmVal'
10
Assuming the condition is false
11
Taking false branch
2533 return MatchOperand_ParseFail;
2534
2535 AArch64MCExpr::VariantKind ELFRefKind;
2536 MCSymbolRefExpr::VariantKind DarwinRefKind;
2537 int64_t Addend;
2538 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
12
1st function call argument is an uninitialized value
2539 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2540 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2541 // No modifier was specified at all; this is the syntax for an ELF basic
2542 // ADR relocation (unfortunately).
2543 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2544 } else {
2545 Error(S, "unexpected adr label");
2546 return MatchOperand_ParseFail;
2547 }
2548 }
2549
2550 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2551 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2552 return MatchOperand_Success;
2553}
2554
2555/// tryParseFPImm - A floating point immediate expression operand.
2556template<bool AddFPZeroAsLiteral>
2557OperandMatchResultTy
2558AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2559 MCAsmParser &Parser = getParser();
2560 SMLoc S = getLoc();
2561
2562 bool Hash = parseOptionalToken(AsmToken::Hash);
2563
2564 // Handle negation, as that still comes through as a separate token.
2565 bool isNegative = parseOptionalToken(AsmToken::Minus);
2566
2567 const AsmToken &Tok = Parser.getTok();
2568 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2569 if (!Hash)
2570 return MatchOperand_NoMatch;
2571 TokError("invalid floating point immediate");
2572 return MatchOperand_ParseFail;
2573 }
2574
2575 // Parse hexadecimal representation.
2576 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2577 if (Tok.getIntVal() > 255 || isNegative) {
2578 TokError("encoded floating point value out of range");
2579 return MatchOperand_ParseFail;
2580 }
2581
2582 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2583 Operands.push_back(
2584 AArch64Operand::CreateFPImm(F, true, S, getContext()));
2585 } else {
2586 // Parse FP representation.
2587 APFloat RealVal(APFloat::IEEEdouble());
2588 auto Status =
2589 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2590 if (isNegative)
2591 RealVal.changeSign();
2592
2593 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2594 Operands.push_back(
2595 AArch64Operand::CreateToken("#0", false, S, getContext()));
2596 Operands.push_back(
2597 AArch64Operand::CreateToken(".0", false, S, getContext()));
2598 } else
2599 Operands.push_back(AArch64Operand::CreateFPImm(
2600 RealVal, Status == APFloat::opOK, S, getContext()));
2601 }
2602
2603 Parser.Lex(); // Eat the token.
2604
2605 return MatchOperand_Success;
2606}
2607
2608/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2609/// a shift suffix, for example '#1, lsl #12'.
2610OperandMatchResultTy
2611AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2612 MCAsmParser &Parser = getParser();
2613 SMLoc S = getLoc();
2614
2615 if (Parser.getTok().is(AsmToken::Hash))
2616 Parser.Lex(); // Eat '#'
2617 else if (Parser.getTok().isNot(AsmToken::Integer))
2618 // Operand should start from # or should be integer, emit error otherwise.
2619 return MatchOperand_NoMatch;
2620
2621 const MCExpr *Imm;
2622 if (parseSymbolicImmVal(Imm))
2623 return MatchOperand_ParseFail;
2624 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2625 SMLoc E = Parser.getTok().getLoc();
2626 Operands.push_back(
2627 AArch64Operand::CreateImm(Imm, S, E, getContext()));
2628 return MatchOperand_Success;
2629 }
2630
2631 // Eat ','
2632 Parser.Lex();
2633
2634 // The optional operand must be "lsl #N" where N is non-negative.
2635 if (!Parser.getTok().is(AsmToken::Identifier) ||
2636 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2637 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2638 return MatchOperand_ParseFail;
2639 }
2640
2641 // Eat 'lsl'
2642 Parser.Lex();
2643
2644 parseOptionalToken(AsmToken::Hash);
2645
2646 if (Parser.getTok().isNot(AsmToken::Integer)) {
2647 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2648 return MatchOperand_ParseFail;
2649 }
2650
2651 int64_t ShiftAmount = Parser.getTok().getIntVal();
2652
2653 if (ShiftAmount < 0) {
2654 Error(Parser.getTok().getLoc(), "positive shift amount required");
2655 return MatchOperand_ParseFail;
2656 }
2657 Parser.Lex(); // Eat the number
2658
2659 // Just in case the optional lsl #0 is used for immediates other than zero.
2660 if (ShiftAmount == 0 && Imm != 0) {
2661 SMLoc E = Parser.getTok().getLoc();
2662 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2663 return MatchOperand_Success;
2664 }
2665
2666 SMLoc E = Parser.getTok().getLoc();
2667 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2668 S, E, getContext()));
2669 return MatchOperand_Success;
2670}
2671
2672/// parseCondCodeString - Parse a Condition Code string.
2673AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2674 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2675 .Case("eq", AArch64CC::EQ)
2676 .Case("ne", AArch64CC::NE)
2677 .Case("cs", AArch64CC::HS)
2678 .Case("hs", AArch64CC::HS)
2679 .Case("cc", AArch64CC::LO)
2680 .Case("lo", AArch64CC::LO)
2681 .Case("mi", AArch64CC::MI)
2682 .Case("pl", AArch64CC::PL)
2683 .Case("vs", AArch64CC::VS)
2684 .Case("vc", AArch64CC::VC)
2685 .Case("hi", AArch64CC::HI)
2686 .Case("ls", AArch64CC::LS)
2687 .Case("ge", AArch64CC::GE)
2688 .Case("lt", AArch64CC::LT)
2689 .Case("gt", AArch64CC::GT)
2690 .Case("le", AArch64CC::LE)
2691 .Case("al", AArch64CC::AL)
2692 .Case("nv", AArch64CC::NV)
2693 .Default(AArch64CC::Invalid);
2694
2695 if (CC == AArch64CC::Invalid &&
2696 getSTI().getFeatureBits()[AArch64::FeatureSVE])
2697 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2698 .Case("none", AArch64CC::EQ)
2699 .Case("any", AArch64CC::NE)
2700 .Case("nlast", AArch64CC::HS)
2701 .Case("last", AArch64CC::LO)
2702 .Case("first", AArch64CC::MI)
2703 .Case("nfrst", AArch64CC::PL)
2704 .Case("pmore", AArch64CC::HI)
2705 .Case("plast", AArch64CC::LS)
2706 .Case("tcont", AArch64CC::GE)
2707 .Case("tstop", AArch64CC::LT)
2708 .Default(AArch64CC::Invalid);
2709
2710 return CC;
2711}
2712
2713/// parseCondCode - Parse a Condition Code operand.
2714bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2715 bool invertCondCode) {
2716 MCAsmParser &Parser = getParser();
2717 SMLoc S = getLoc();
2718 const AsmToken &Tok = Parser.getTok();
2719 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")((Tok.is(AsmToken::Identifier) && "Token is not an Identifier"
) ? static_cast<void> (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2719, __PRETTY_FUNCTION__))
;
2720
2721 StringRef Cond = Tok.getString();
2722 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2723 if (CC == AArch64CC::Invalid)
2724 return TokError("invalid condition code");
2725 Parser.Lex(); // Eat identifier token.
2726
2727 if (invertCondCode) {
2728 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2729 return TokError("condition codes AL and NV are invalid for this instruction");
2730 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2731 }
2732
2733 Operands.push_back(
2734 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2735 return false;
2736}
2737
2738/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2739/// them if present.
2740OperandMatchResultTy
2741AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2742 MCAsmParser &Parser = getParser();
2743 const AsmToken &Tok = Parser.getTok();
2744 std::string LowerID = Tok.getString().lower();
2745 AArch64_AM::ShiftExtendType ShOp =
2746 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2747 .Case("lsl", AArch64_AM::LSL)
2748 .Case("lsr", AArch64_AM::LSR)
2749 .Case("asr", AArch64_AM::ASR)
2750 .Case("ror", AArch64_AM::ROR)
2751 .Case("msl", AArch64_AM::MSL)
2752 .Case("uxtb", AArch64_AM::UXTB)
2753 .Case("uxth", AArch64_AM::UXTH)
2754 .Case("uxtw", AArch64_AM::UXTW)
2755 .Case("uxtx", AArch64_AM::UXTX)
2756 .Case("sxtb", AArch64_AM::SXTB)
2757 .Case("sxth", AArch64_AM::SXTH)
2758 .Case("sxtw", AArch64_AM::SXTW)
2759 .Case("sxtx", AArch64_AM::SXTX)
2760 .Default(AArch64_AM::InvalidShiftExtend);
2761
2762 if (ShOp == AArch64_AM::InvalidShiftExtend)
2763 return MatchOperand_NoMatch;
2764
2765 SMLoc S = Tok.getLoc();
2766 Parser.Lex();
2767
2768 bool Hash = parseOptionalToken(AsmToken::Hash);
2769
2770 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2771 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2772 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2773 ShOp == AArch64_AM::MSL) {
2774 // We expect a number here.
2775 TokError("expected #imm after shift specifier");
2776 return MatchOperand_ParseFail;
2777 }
2778
2779 // "extend" type operations don't need an immediate, #0 is implicit.
2780 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2781 Operands.push_back(
2782 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2783 return MatchOperand_Success;
2784 }
2785
2786 // Make sure we do actually have a number, identifier or a parenthesized
2787 // expression.
2788 SMLoc E = Parser.getTok().getLoc();
2789 if (!Parser.getTok().is(AsmToken::Integer) &&
2790 !Parser.getTok().is(AsmToken::LParen) &&
2791 !Parser.getTok().is(AsmToken::Identifier)) {
2792 Error(E, "expected integer shift amount");
2793 return MatchOperand_ParseFail;
2794 }
2795
2796 const MCExpr *ImmVal;
2797 if (getParser().parseExpression(ImmVal))
2798 return MatchOperand_ParseFail;
2799
2800 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2801 if (!MCE) {
2802 Error(E, "expected constant '#imm' after shift specifier");
2803 return MatchOperand_ParseFail;
2804 }
2805
2806 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2807 Operands.push_back(AArch64Operand::CreateShiftExtend(
2808 ShOp, MCE->getValue(), true, S, E, getContext()));
2809 return MatchOperand_Success;
2810}
2811
2812static const struct Extension {
2813 const char *Name;
2814 const FeatureBitset Features;
2815} ExtensionMap[] = {
2816 { "crc", {AArch64::FeatureCRC} },
2817 { "sm4", {AArch64::FeatureSM4} },
2818 { "sha3", {AArch64::FeatureSHA3} },
2819 { "sha2", {AArch64::FeatureSHA2} },
2820 { "aes", {AArch64::FeatureAES} },
2821 { "crypto", {AArch64::FeatureCrypto} },
2822 { "fp", {AArch64::FeatureFPARMv8} },
2823 { "simd", {AArch64::FeatureNEON} },
2824 { "ras", {AArch64::FeatureRAS} },
2825 { "lse", {AArch64::FeatureLSE} },
2826 { "predctrl", {AArch64::FeaturePredCtrl} },
2827 { "ccdp", {AArch64::FeatureCacheDeepPersist} },
2828 { "mte", {AArch64::FeatureMTE} },
2829
2830 // FIXME: Unsupported extensions
2831 { "pan", {} },
2832 { "lor", {} },
2833 { "rdma", {} },
2834 { "profile", {} },
2835};
2836
2837
2838static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2839 if (FBS[AArch64::HasV8_1aOps])
2840 Str += "ARMv8.1a";
2841 else if (FBS[AArch64::HasV8_2aOps])
2842 Str += "ARMv8.2a";
2843 else if (FBS[AArch64::HasV8_3aOps])
2844 Str += "ARMv8.3a";
2845 else if (FBS[AArch64::HasV8_4aOps])
2846 Str += "ARMv8.4a";
2847 else if (FBS[AArch64::HasV8_5aOps])
2848 Str += "ARMv8.5a";
2849 else {
2850 auto ext = std::find_if(std::begin(ExtensionMap),
2851 std::end(ExtensionMap),
2852 [&](const Extension& e)
2853 // Use & in case multiple features are enabled
2854 { return (FBS & e.Features) != FeatureBitset(); }
2855 );
2856
2857 Str += ext != std::end(ExtensionMap) ? ext->Name : "(unknown)";
2858 }
2859}
2860
2861void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2862 SMLoc S) {
2863 const uint16_t Op2 = Encoding & 7;
2864 const uint16_t Cm = (Encoding & 0x78) >> 3;
2865 const uint16_t Cn = (Encoding & 0x780) >> 7;
2866 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2867
2868 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2869
2870 Operands.push_back(
2871 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2872 Operands.push_back(
2873 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2874 Operands.push_back(
2875 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2876 Expr = MCConstantExpr::create(Op2, getContext());
2877 Operands.push_back(
2878 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2879}
2880
2881/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2882/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2883bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2884 OperandVector &Operands) {
2885 if (Name.find('.') != StringRef::npos)
2886 return TokError("invalid operand");
2887
2888 Mnemonic = Name;
2889 Operands.push_back(
2890 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2891
2892 MCAsmParser &Parser = getParser();
2893 const AsmToken &Tok = Parser.getTok();
2894 StringRef Op = Tok.getString();
2895 SMLoc S = Tok.getLoc();
2896
2897 if (Mnemonic == "ic") {
2898 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2899 if (!IC)
2900 return TokError("invalid operand for IC instruction");
2901 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2902 std::string Str("IC " + std::string(IC->Name) + " requires ");
2903 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2904 return TokError(Str.c_str());
2905 }
2906 createSysAlias(IC->Encoding, Operands, S);
2907 } else if (Mnemonic == "dc") {
2908 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2909 if (!DC)
2910 return TokError("invalid operand for DC instruction");
2911 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2912 std::string Str("DC " + std::string(DC->Name) + " requires ");
2913 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
2914 return TokError(Str.c_str());
2915 }
2916 createSysAlias(DC->Encoding, Operands, S);
2917 } else if (Mnemonic == "at") {
2918 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2919 if (!AT)
2920 return TokError("invalid operand for AT instruction");
2921 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2922 std::string Str("AT " + std::string(AT->Name) + " requires ");
2923 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
2924 return TokError(Str.c_str());
2925 }
2926 createSysAlias(AT->Encoding, Operands, S);
2927 } else if (Mnemonic == "tlbi") {
2928 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2929 if (!TLBI)
2930 return TokError("invalid operand for TLBI instruction");
2931 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2932 std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2933 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
2934 return TokError(Str.c_str());
2935 }
2936 createSysAlias(TLBI->Encoding, Operands, S);
2937 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
2938 const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
2939 if (!PRCTX)
2940 return TokError("invalid operand for prediction restriction instruction");
2941 else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
2942 std::string Str(
2943 Mnemonic.upper() + std::string(PRCTX->Name) + " requires ");
2944 setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
2945 return TokError(Str.c_str());
2946 }
2947 uint16_t PRCTX_Op2 =
2948 Mnemonic == "cfp" ? 4 :
2949 Mnemonic == "dvp" ? 5 :
2950 Mnemonic == "cpp" ? 7 :
2951 0;
2952 assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction")((PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction"
) ? static_cast<void> (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2952, __PRETTY_FUNCTION__))
;
2953 createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
2954 }
2955
2956 Parser.Lex(); // Eat operand.
2957
2958 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2959 bool HasRegister = false;
2960
2961 // Check for the optional register operand.
2962 if (parseOptionalToken(AsmToken::Comma)) {
2963 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2964 return TokError("expected register operand");
2965 HasRegister = true;
2966 }
2967
2968 if (ExpectRegister && !HasRegister)
2969 return TokError("specified " + Mnemonic + " op requires a register");
2970 else if (!ExpectRegister && HasRegister)
2971 return TokError("specified " + Mnemonic + " op does not use a register");
2972
2973 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2974 return true;
2975
2976 return false;
2977}
2978
2979OperandMatchResultTy
2980AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2981 MCAsmParser &Parser = getParser();
2982 const AsmToken &Tok = Parser.getTok();
2983
2984 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
2985 TokError("'csync' operand expected");
2986 return MatchOperand_ParseFail;
2987 // Can be either a #imm style literal or an option name
2988 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
2989 // Immediate operand.
2990 const MCExpr *ImmVal;
2991 SMLoc ExprLoc = getLoc();
2992 if (getParser().parseExpression(ImmVal))
2993 return MatchOperand_ParseFail;
2994 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2995 if (!MCE) {
2996 Error(ExprLoc, "immediate value expected for barrier operand");
2997 return MatchOperand_ParseFail;
2998 }
2999 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
3000 Error(ExprLoc, "barrier operand out of range");
3001 return MatchOperand_ParseFail;
3002 }
3003 auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
3004 Operands.push_back(AArch64Operand::CreateBarrier(
3005 MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
3006 return MatchOperand_Success;
3007 }
3008
3009 if (Tok.isNot(AsmToken::Identifier)) {
3010 TokError("invalid operand for instruction");
3011 return MatchOperand_ParseFail;
3012 }
3013
3014 auto TSB = AArch64TSB::lookupTSBByName(Tok.getString());
3015 // The only valid named option for ISB is 'sy'
3016 auto DB = AArch64DB::lookupDBByName(Tok.getString());
3017 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3018 TokError("'sy' or #imm operand expected");
3019 return MatchOperand_ParseFail;
3020 // The only valid named option for TSB is 'csync'
3021 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3022 TokError("'csync' operand expected");
3023 return MatchOperand_ParseFail;
3024 } else if (!DB && !TSB) {
3025 TokError("invalid barrier option name");
3026 return MatchOperand_ParseFail;
3027 }
3028
3029 Operands.push_back(AArch64Operand::CreateBarrier(
3030 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext()));
3031 Parser.Lex(); // Consume the option
3032
3033 return MatchOperand_Success;
3034}
3035
3036OperandMatchResultTy
3037AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3038 MCAsmParser &Parser = getParser();
3039 const AsmToken &Tok = Parser.getTok();
3040
3041 if (Tok.isNot(AsmToken::Identifier))
3042 return MatchOperand_NoMatch;
3043
3044 int MRSReg, MSRReg;
3045 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3046 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3047 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3048 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3049 } else
3050 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3051
3052 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3053 unsigned PStateImm = -1;
3054 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3055 PStateImm = PState->Encoding;
3056
3057 Operands.push_back(
3058 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3059 PStateImm, getContext()));
3060 Parser.Lex(); // Eat identifier
3061
3062 return MatchOperand_Success;
3063}
3064
3065/// tryParseNeonVectorRegister - Parse a vector register operand.
3066bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3067 MCAsmParser &Parser = getParser();
3068 if (Parser.getTok().isNot(AsmToken::Identifier))
3069 return true;
3070
3071 SMLoc S = getLoc();
3072 // Check for a vector register specifier first.
3073 StringRef Kind;
3074 unsigned Reg;
3075 OperandMatchResultTy Res =
3076 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3077 if (Res != MatchOperand_Success)
3078 return true;
3079
3080 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3081 if (!KindRes)
3082 return true;
3083
3084 unsigned ElementWidth = KindRes->second;
3085 Operands.push_back(
3086 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3087 S, getLoc(), getContext()));
3088
3089 // If there was an explicit qualifier, that goes on as a literal text
3090 // operand.
3091 if (!Kind.empty())
3092 Operands.push_back(
3093 AArch64Operand::CreateToken(Kind, false, S, getContext()));
3094
3095 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3096}
3097
3098OperandMatchResultTy
3099AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3100 SMLoc SIdx = getLoc();
3101 if (parseOptionalToken(AsmToken::LBrac)) {
3102 const MCExpr *ImmVal;
3103 if (getParser().parseExpression(ImmVal))
3104 return MatchOperand_NoMatch;
3105 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3106 if (!MCE) {
3107 TokError("immediate value expected for vector index");
3108 return MatchOperand_ParseFail;;
3109 }
3110
3111 SMLoc E = getLoc();
3112
3113 if (parseToken(AsmToken::RBrac, "']' expected"))
3114 return MatchOperand_ParseFail;;
3115
3116 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3117 E, getContext()));
3118 return MatchOperand_Success;
3119 }
3120
3121 return MatchOperand_NoMatch;
3122}
3123
3124// tryParseVectorRegister - Try to parse a vector register name with
3125// optional kind specifier. If it is a register specifier, eat the token
3126// and return it.
3127OperandMatchResultTy
3128AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3129 RegKind MatchKind) {
3130 MCAsmParser &Parser = getParser();
3131 const AsmToken &Tok = Parser.getTok();
3132
3133 if (Tok.isNot(AsmToken::Identifier))
3134 return MatchOperand_NoMatch;
3135
3136 StringRef Name = Tok.getString();
3137 // If there is a kind specifier, it's separated from the register name by
3138 // a '.'.
3139 size_t Start = 0, Next = Name.find('.');
3140 StringRef Head = Name.slice(Start, Next);
3141 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3142
3143 if (RegNum) {
3144 if (Next != StringRef::npos) {
3145 Kind = Name.slice(Next, StringRef::npos);
3146 if (!isValidVectorKind(Kind, MatchKind)) {
3147 TokError("invalid vector kind qualifier");
3148 return MatchOperand_ParseFail;
3149 }
3150 }
3151 Parser.Lex(); // Eat the register token.
3152
3153 Reg = RegNum;
3154 return MatchOperand_Success;
3155 }
3156
3157 return MatchOperand_NoMatch;
3158}
3159
3160/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3161OperandMatchResultTy
3162AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3163 // Check for a SVE predicate register specifier first.
3164 const SMLoc S = getLoc();
3165 StringRef Kind;
3166 unsigned RegNum;
3167 auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3168 if (Res != MatchOperand_Success)
3169 return Res;
3170
3171 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3172 if (!KindRes)
3173 return MatchOperand_NoMatch;
3174
3175 unsigned ElementWidth = KindRes->second;
3176 Operands.push_back(AArch64Operand::CreateVectorReg(
3177 RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3178 getLoc(), getContext()));
3179
3180 // Not all predicates are followed by a '/m' or '/z'.
3181 MCAsmParser &Parser = getParser();
3182 if (Parser.getTok().isNot(AsmToken::Slash))
3183 return MatchOperand_Success;
3184
3185 // But when they do they shouldn't have an element type suffix.
3186 if (!Kind.empty()) {
3187 Error(S, "not expecting size suffix");
3188 return MatchOperand_ParseFail;
3189 }
3190
3191 // Add a literal slash as operand
3192 Operands.push_back(
3193 AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3194
3195 Parser.Lex(); // Eat the slash.
3196
3197 // Zeroing or merging?
3198 auto Pred = Parser.getTok().getString().lower();
3199 if (Pred != "z" && Pred != "m") {
3200 Error(getLoc(), "expecting 'm' or 'z' predication");
3201 return MatchOperand_ParseFail;
3202 }
3203
3204 // Add zero/merge token.
3205 const char *ZM = Pred == "z" ? "z" : "m";
3206 Operands.push_back(
3207 AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3208
3209 Parser.Lex(); // Eat zero/merge token.
3210 return MatchOperand_Success;
3211}
3212
3213/// parseRegister - Parse a register operand.
3214bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3215 // Try for a Neon vector register.
3216 if (!tryParseNeonVectorRegister(Operands))
3217 return false;
3218
3219 // Otherwise try for a scalar register.
3220 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3221 return false;
3222
3223 return true;
3224}
3225
3226bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3227 MCAsmParser &Parser = getParser();
3228 bool HasELFModifier = false;
3229 AArch64MCExpr::VariantKind RefKind;
3230
3231 if (parseOptionalToken(AsmToken::Colon)) {
5
Assuming the condition is true
6
Taking true branch
3232 HasELFModifier = true;
3233
3234 if (Parser.getTok().isNot(AsmToken::Identifier))
7
Taking true branch
3235 return TokError("expect relocation specifier in operand after ':'");
8
Returning without writing to 'ImmVal'
3236
3237 std::string LowerCase = Parser.getTok().getIdentifier().lower();
3238 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3239 .Case("lo12", AArch64MCExpr::VK_LO12)
3240 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3241 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3242 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3243 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3244 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3245 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3246 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3247 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3248 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3249 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3250 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3251 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3252 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3253 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3254 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3255 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3256 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3257 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3258 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3259 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3260 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3261 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3262 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3263 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3264 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3265 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3266 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3267 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3268 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3269 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3270 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3271 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3272 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3273 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3274 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3275 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3276 .Default(AArch64MCExpr::VK_INVALID);
3277
3278 if (RefKind == AArch64MCExpr::VK_INVALID)
3279 return TokError("expect relocation specifier in operand after ':'");
3280
3281 Parser.Lex(); // Eat identifier
3282
3283 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3284 return true;
3285 }
3286
3287 if (getParser().parseExpression(ImmVal))
3288 return true;
3289
3290 if (HasELFModifier)
3291 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3292
3293 return false;
3294}
3295
3296template <RegKind VectorKind>
3297OperandMatchResultTy
3298AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3299 bool ExpectMatch) {
3300 MCAsmParser &Parser = getParser();
3301 if (!Parser.getTok().is(AsmToken::LCurly))
3302 return MatchOperand_NoMatch;
3303
3304 // Wrapper around parse function
3305 auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3306 bool NoMatchIsError) {
3307 auto RegTok = Parser.getTok();
3308 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3309 if (ParseRes == MatchOperand_Success) {
3310 if (parseVectorKind(Kind, VectorKind))
3311 return ParseRes;
3312 llvm_unreachable("Expected a valid vector kind")::llvm::llvm_unreachable_internal("Expected a valid vector kind"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3312)
;
3313 }
3314
3315 if (RegTok.isNot(AsmToken::Identifier) ||
3316 ParseRes == MatchOperand_ParseFail ||
3317 (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3318 Error(Loc, "vector register expected");
3319 return MatchOperand_ParseFail;
3320 }
3321
3322 return MatchOperand_NoMatch;
3323 };
3324
3325 SMLoc S = getLoc();
3326 auto LCurly = Parser.getTok();
3327 Parser.Lex(); // Eat left bracket token.
3328
3329 StringRef Kind;
3330 unsigned FirstReg;
3331 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3332
3333 // Put back the original left bracket if there was no match, so that
3334 // different types of list-operands can be matched (e.g. SVE, Neon).
3335 if (ParseRes == MatchOperand_NoMatch)
3336 Parser.getLexer().UnLex(LCurly);
3337
3338 if (ParseRes != MatchOperand_Success)
3339 return ParseRes;
3340
3341 int64_t PrevReg = FirstReg;
3342 unsigned Count = 1;
3343
3344 if (parseOptionalToken(AsmToken::Minus)) {
3345 SMLoc Loc = getLoc();
3346 StringRef NextKind;
3347
3348 unsigned Reg;
3349 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3350 if (ParseRes != MatchOperand_Success)
3351 return ParseRes;
3352
3353 // Any Kind suffices must match on all regs in the list.
3354 if (Kind != NextKind) {
3355 Error(Loc, "mismatched register size suffix");
3356 return MatchOperand_ParseFail;
3357 }
3358
3359 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3360
3361 if (Space == 0 || Space > 3) {
3362 Error(Loc, "invalid number of vectors");
3363 return MatchOperand_ParseFail;
3364 }
3365
3366 Count += Space;
3367 }
3368 else {
3369 while (parseOptionalToken(AsmToken::Comma)) {
3370 SMLoc Loc = getLoc();
3371 StringRef NextKind;
3372 unsigned Reg;
3373 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3374 if (ParseRes != MatchOperand_Success)
3375 return ParseRes;
3376
3377 // Any Kind suffices must match on all regs in the list.
3378 if (Kind != NextKind) {
3379 Error(Loc, "mismatched register size suffix");
3380 return MatchOperand_ParseFail;
3381 }
3382
3383 // Registers must be incremental (with wraparound at 31)
3384 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3385 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3386 Error(Loc, "registers must be sequential");
3387 return MatchOperand_ParseFail;
3388 }
3389
3390 PrevReg = Reg;
3391 ++Count;
3392 }
3393 }
3394
3395 if (parseToken(AsmToken::RCurly, "'}' expected"))
3396 return MatchOperand_ParseFail;
3397
3398 if (Count > 4) {
3399 Error(S, "invalid number of vectors");
3400 return MatchOperand_ParseFail;
3401 }
3402
3403 unsigned NumElements = 0;
3404 unsigned ElementWidth = 0;
3405 if (!Kind.empty()) {
3406 if (const auto &VK = parseVectorKind(Kind, VectorKind))
3407 std::tie(NumElements, ElementWidth) = *VK;
3408 }
3409
3410 Operands.push_back(AArch64Operand::CreateVectorList(
3411 FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3412 getContext()));
3413
3414 return MatchOperand_Success;
3415}
3416
3417/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3418bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3419 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3420 if (ParseRes != MatchOperand_Success)
3421 return true;
3422
3423 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3424}
3425
3426OperandMatchResultTy
3427AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3428 SMLoc StartLoc = getLoc();
3429
3430 unsigned RegNum;
3431 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3432 if (Res != MatchOperand_Success)
3433 return Res;
3434
3435 if (!parseOptionalToken(AsmToken::Comma)) {
3436 Operands.push_back(AArch64Operand::CreateReg(
3437 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3438 return MatchOperand_Success;
3439 }
3440
3441 parseOptionalToken(AsmToken::Hash);
3442
3443 if (getParser().getTok().isNot(AsmToken::Integer)) {
3444 Error(getLoc(), "index must be absent or #0");
3445 return MatchOperand_ParseFail;
3446 }
3447
3448 const MCExpr *ImmVal;
3449 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3450 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3451 Error(getLoc(), "index must be absent or #0");
3452 return MatchOperand_ParseFail;
3453 }
3454
3455 Operands.push_back(AArch64Operand::CreateReg(
3456 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3457 return MatchOperand_Success;
3458}
3459
3460template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3461OperandMatchResultTy
3462AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3463 SMLoc StartLoc = getLoc();
3464
3465 unsigned RegNum;
3466 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3467 if (Res != MatchOperand_Success)
3468 return Res;
3469
3470 // No shift/extend is the default.
3471 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3472 Operands.push_back(AArch64Operand::CreateReg(
3473 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3474 return MatchOperand_Success;
3475 }
3476
3477 // Eat the comma
3478 getParser().Lex();
3479
3480 // Match the shift
3481 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
3482 Res = tryParseOptionalShiftExtend(ExtOpnd);
3483 if (Res != MatchOperand_Success)
3484 return Res;
3485
3486 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3487 Operands.push_back(AArch64Operand::CreateReg(
3488 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3489 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3490 Ext->hasShiftExtendAmount()));
3491
3492 return MatchOperand_Success;
3493}
3494
3495bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3496 MCAsmParser &Parser = getParser();
3497
3498 // Some SVE instructions have a decoration after the immediate, i.e.
3499 // "mul vl". We parse them here and add tokens, which must be present in the
3500 // asm string in the tablegen instruction.
3501 bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3502 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3503 if (!Parser.getTok().getString().equals_lower("mul") ||
3504 !(NextIsVL || NextIsHash))
3505 return true;
3506
3507 Operands.push_back(
3508 AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3509 Parser.Lex(); // Eat the "mul"
3510
3511 if (NextIsVL) {
3512 Operands.push_back(
3513 AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3514 Parser.Lex(); // Eat the "vl"
3515 return false;
3516 }
3517
3518 if (NextIsHash) {
3519 Parser.Lex(); // Eat the #
3520 SMLoc S = getLoc();
3521
3522 // Parse immediate operand.
3523 const MCExpr *ImmVal;
3524 if (!Parser.parseExpression(ImmVal))
3525 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3526 Operands.push_back(AArch64Operand::CreateImm(
3527 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3528 getContext()));
3529 return MatchOperand_Success;
3530 }
3531 }
3532
3533 return Error(getLoc(), "expected 'vl' or '#<imm>'");
3534}
3535
3536/// parseOperand - Parse a arm instruction operand. For now this parses the
3537/// operand regardless of the mnemonic.
3538bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3539 bool invertCondCode) {
3540 MCAsmParser &Parser = getParser();
3541
3542 OperandMatchResultTy ResTy =
3543 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3544
3545 // Check if the current operand has a custom associated parser, if so, try to
3546 // custom parse the operand, or fallback to the general approach.
3547 if (ResTy == MatchOperand_Success)
3548 return false;
3549 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3550 // there was a match, but an error occurred, in which case, just return that
3551 // the operand parsing failed.
3552 if (ResTy == MatchOperand_ParseFail)
3553 return true;
3554
3555 // Nothing custom, so do general case parsing.
3556 SMLoc S, E;
3557 switch (getLexer().getKind()) {
3558 default: {
3559 SMLoc S = getLoc();
3560 const MCExpr *Expr;
3561 if (parseSymbolicImmVal(Expr))
3562 return Error(S, "invalid operand");
3563
3564 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3565 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3566 return false;
3567 }
3568 case AsmToken::LBrac: {
3569 SMLoc Loc = Parser.getTok().getLoc();
3570 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3571 getContext()));
3572 Parser.Lex(); // Eat '['
3573
3574 // There's no comma after a '[', so we can parse the next operand
3575 // immediately.
3576 return parseOperand(Operands, false, false);
3577 }
3578 case AsmToken::LCurly:
3579 return parseNeonVectorList(Operands);
3580 case AsmToken::Identifier: {
3581 // If we're expecting a Condition Code operand, then just parse that.
3582 if (isCondCode)
3583 return parseCondCode(Operands, invertCondCode);
3584
3585 // If it's a register name, parse it.
3586 if (!parseRegister(Operands))
3587 return false;
3588
3589 // See if this is a "mul vl" decoration or "mul #<int>" operand used
3590 // by SVE instructions.
3591 if (!parseOptionalMulOperand(Operands))
3592 return false;
3593
3594 // This could be an optional "shift" or "extend" operand.
3595 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3596 // We can only continue if no tokens were eaten.
3597 if (GotShift != MatchOperand_NoMatch)
3598 return GotShift;
3599
3600 // This was not a register so parse other operands that start with an
3601 // identifier (like labels) as expressions and create them as immediates.
3602 const MCExpr *IdVal;
3603 S = getLoc();
3604 if (getParser().parseExpression(IdVal))
3605 return true;
3606 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3607 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3608 return false;
3609 }
3610 case AsmToken::Integer:
3611 case AsmToken::Real:
3612 case AsmToken::Hash: {
3613 // #42 -> immediate.
3614 S = getLoc();
3615
3616 parseOptionalToken(AsmToken::Hash);
3617
3618 // Parse a negative sign
3619 bool isNegative = false;
3620 if (Parser.getTok().is(AsmToken::Minus)) {
3621 isNegative = true;
3622 // We need to consume this token only when we have a Real, otherwise
3623 // we let parseSymbolicImmVal take care of it
3624 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3625 Parser.Lex();
3626 }
3627
3628 // The only Real that should come through here is a literal #0.0 for
3629 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3630 // so convert the value.
3631 const AsmToken &Tok = Parser.getTok();
3632 if (Tok.is(AsmToken::Real)) {
3633 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3634 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3635 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3636 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3637 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3638 return TokError("unexpected floating point literal");
3639 else if (IntVal != 0 || isNegative)
3640 return TokError("expected floating-point constant #0.0");
3641 Parser.Lex(); // Eat the token.
3642
3643 Operands.push_back(
3644 AArch64Operand::CreateToken("#0", false, S, getContext()));
3645 Operands.push_back(
3646 AArch64Operand::CreateToken(".0", false, S, getContext()));
3647 return false;
3648 }
3649
3650 const MCExpr *ImmVal;
3651 if (parseSymbolicImmVal(ImmVal))
3652 return true;
3653
3654 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3655 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3656 return false;
3657 }
3658 case AsmToken::Equal: {
3659 SMLoc Loc = getLoc();
3660 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3661 return TokError("unexpected token in operand");
3662 Parser.Lex(); // Eat '='
3663 const MCExpr *SubExprVal;
3664 if (getParser().parseExpression(SubExprVal))
3665 return true;
3666
3667 if (Operands.size() < 2 ||
3668 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3669 return Error(Loc, "Only valid when first operand is register");
3670
3671 bool IsXReg =
3672 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3673 Operands[1]->getReg());
3674
3675 MCContext& Ctx = getContext();
3676 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3677 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3678 if (isa<MCConstantExpr>(SubExprVal)) {
3679 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3680 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3681 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3682 ShiftAmt += 16;
3683 Imm >>= 16;
3684 }
3685 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3686 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3687 Operands.push_back(AArch64Operand::CreateImm(
3688 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3689 if (ShiftAmt)
3690 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3691 ShiftAmt, true, S, E, Ctx));
3692 return false;
3693 }
3694 APInt Simm = APInt(64, Imm << ShiftAmt);
3695 // check if the immediate is an unsigned or signed 32-bit int for W regs
3696 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3697 return Error(Loc, "Immediate too large for register");
3698 }
3699 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3700 const MCExpr *CPLoc =
3701 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3702 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3703 return false;
3704 }
3705 }
3706}
3707
3708bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3709 const MCParsedAsmOperand &Op2) const {
3710 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3711 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3712 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3713 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3714 return MCTargetAsmParser::regsEqual(Op1, Op2);
3715
3716 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&((AOp1.isScalarReg() && AOp2.isScalarReg() &&
"Testing equality of non-scalar registers not supported") ? static_cast
<void> (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3717, __PRETTY_FUNCTION__))
3717 "Testing equality of non-scalar registers not supported")((AOp1.isScalarReg() && AOp2.isScalarReg() &&
"Testing equality of non-scalar registers not supported") ? static_cast
<void> (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3717, __PRETTY_FUNCTION__))
;
3718
3719 // Check if a registers match their sub/super register classes.
3720 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3721 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3722 if (AOp1.getRegEqualityTy() == EqualsSubReg)
3723 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3724 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3725 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3726 if (AOp2.getRegEqualityTy() == EqualsSubReg)
3727 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3728
3729 return false;
3730}
3731
3732/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3733/// operands.
3734bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3735 StringRef Name, SMLoc NameLoc,
3736 OperandVector &Operands) {
3737 MCAsmParser &Parser = getParser();
3738 Name = StringSwitch<StringRef>(Name.lower())
3739 .Case("beq", "b.eq")
3740 .Case("bne", "b.ne")
3741 .Case("bhs", "b.hs")
3742 .Case("bcs", "b.cs")
3743 .Case("blo", "b.lo")
3744 .Case("bcc", "b.cc")
3745 .Case("bmi", "b.mi")
3746 .Case("bpl", "b.pl")
3747 .Case("bvs", "b.vs")
3748 .Case("bvc", "b.vc")
3749 .Case("bhi", "b.hi")
3750 .Case("bls", "b.ls")
3751 .Case("bge", "b.ge")
3752 .Case("blt", "b.lt")
3753 .Case("bgt", "b.gt")
3754 .Case("ble", "b.le")
3755 .Case("bal", "b.al")
3756 .Case("bnv", "b.nv")
3757 .Default(Name);
3758
3759 // First check for the AArch64-specific .req directive.
3760 if (Parser.getTok().is(AsmToken::Identifier) &&
3761 Parser.getTok().getIdentifier() == ".req") {
3762 parseDirectiveReq(Name, NameLoc);
3763 // We always return 'error' for this, as we're done with this
3764 // statement and don't need to match the 'instruction."
3765 return true;
3766 }
3767
3768 // Create the leading tokens for the mnemonic, split by '.' characters.
3769 size_t Start = 0, Next = Name.find('.');
3770 StringRef Head = Name.slice(Start, Next);
3771
3772 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
3773 // the SYS instruction.
3774 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
3775 Head == "cfp" || Head == "dvp" || Head == "cpp")
3776 return parseSysAlias(Head, NameLoc, Operands);
3777
3778 Operands.push_back(
3779 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3780 Mnemonic = Head;
3781
3782 // Handle condition codes for a branch mnemonic
3783 if (Head == "b" && Next != StringRef::npos) {
3784 Start = Next;
3785 Next = Name.find('.', Start + 1);
3786 Head = Name.slice(Start + 1, Next);
3787
3788 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3789 (Head.data() - Name.data()));
3790 AArch64CC::CondCode CC = parseCondCodeString(Head);
3791 if (CC == AArch64CC::Invalid)
3792 return Error(SuffixLoc, "invalid condition code");
3793 Operands.push_back(
3794 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3795 Operands.push_back(
3796 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3797 }
3798
3799 // Add the remaining tokens in the mnemonic.
3800 while (Next != StringRef::npos) {
3801 Start = Next;
3802 Next = Name.find('.', Start + 1);
3803 Head = Name.slice(Start, Next);
3804 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3805 (Head.data() - Name.data()) + 1);
3806 Operands.push_back(
3807 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3808 }
3809
3810 // Conditional compare instructions have a Condition Code operand, which needs
3811 // to be parsed and an immediate operand created.
3812 bool condCodeFourthOperand =
3813 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3814 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3815 Head == "csinc" || Head == "csinv" || Head == "csneg");
3816
3817 // These instructions are aliases to some of the conditional select
3818 // instructions. However, the condition code is inverted in the aliased
3819 // instruction.
3820 //
3821 // FIXME: Is this the correct way to handle these? Or should the parser
3822 // generate the aliased instructions directly?
3823 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3824 bool condCodeThirdOperand =
3825 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3826
3827 // Read the remaining operands.
3828 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3829
3830 unsigned N = 1;
3831 do {
3832 // Parse and remember the operand.
3833 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3834 (N == 3 && condCodeThirdOperand) ||
3835 (N == 2 && condCodeSecondOperand),
3836 condCodeSecondOperand || condCodeThirdOperand)) {
3837 return true;
3838 }
3839
3840 // After successfully parsing some operands there are two special cases to
3841 // consider (i.e. notional operands not separated by commas). Both are due
3842 // to memory specifiers:
3843 // + An RBrac will end an address for load/store/prefetch
3844 // + An '!' will indicate a pre-indexed operation.
3845 //
3846 // It's someone else's responsibility to make sure these tokens are sane
3847 // in the given context!
3848
3849 SMLoc RLoc = Parser.getTok().getLoc();
3850 if (parseOptionalToken(AsmToken::RBrac))
3851 Operands.push_back(
3852 AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3853 SMLoc ELoc = Parser.getTok().getLoc();
3854 if (parseOptionalToken(AsmToken::Exclaim))
3855 Operands.push_back(
3856 AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3857
3858 ++N;
3859 } while (parseOptionalToken(AsmToken::Comma));
3860 }
3861
3862 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3863 return true;
3864
3865 return false;
3866}
3867
3868static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
3869 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31))(((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31
)) ? static_cast<void> (0) : __assert_fail ("(ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3869, __PRETTY_FUNCTION__))
;
3870 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
3871 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
3872 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
3873 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
3874 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
3875 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
3876}
3877
3878// FIXME: This entire function is a giant hack to provide us with decent
3879// operand range validation/diagnostics until TableGen/MC can be extended
3880// to support autogeneration of this kind of validation.
3881bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
3882 SmallVectorImpl<SMLoc> &Loc) {
3883 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3884 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
3885
3886 // A prefix only applies to the instruction following it. Here we extract
3887 // prefix information for the next instruction before validating the current
3888 // one so that in the case of failure we don't erronously continue using the
3889 // current prefix.
3890 PrefixInfo Prefix = NextPrefix;
3891 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
3892
3893 // Before validating the instruction in isolation we run through the rules
3894 // applicable when it follows a prefix instruction.
3895 // NOTE: brk & hlt can be prefixed but require no additional validation.
3896 if (Prefix.isActive() &&
3897 (Inst.getOpcode() != AArch64::BRK) &&
3898 (Inst.getOpcode() != AArch64::HLT)) {
3899
3900 // Prefixed intructions must have a destructive operand.
3901 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
3902 AArch64::NotDestructive)
3903 return Error(IDLoc, "instruction is unpredictable when following a"
3904 " movprfx, suggest replacing movprfx with mov");
3905
3906 // Destination operands must match.
3907 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
3908 return Error(Loc[0], "instruction is unpredictable when following a"
3909 " movprfx writing to a different destination");
3910
3911 // Destination operand must not be used in any other location.
3912 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
3913 if (Inst.getOperand(i).isReg() &&
3914 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
3915 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
3916 return Error(Loc[0], "instruction is unpredictable when following a"
3917 " movprfx and destination also used as non-destructive"
3918 " source");
3919 }
3920
3921 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
3922 if (Prefix.isPredicated()) {
3923 int PgIdx = -1;
3924
3925 // Find the instructions general predicate.
3926 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
3927 if (Inst.getOperand(i).isReg() &&
3928 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
3929 PgIdx = i;
3930 break;
3931 }
3932
3933 // Instruction must be predicated if the movprfx is predicated.
3934 if (PgIdx == -1 ||
3935 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
3936 return Error(IDLoc, "instruction is unpredictable when following a"
3937 " predicated movprfx, suggest using unpredicated movprfx");
3938
3939 // Instruction must use same general predicate as the movprfx.
3940 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
3941 return Error(IDLoc, "instruction is unpredictable when following a"
3942 " predicated movprfx using a different general predicate");
3943
3944 // Instruction element type must match the movprfx.
3945 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
3946 return Error(IDLoc, "instruction is unpredictable when following a"
3947 " predicated movprfx with a different element size");
3948 }
3949 }
3950
3951 // Check for indexed addressing modes w/ the base register being the
3952 // same as a destination/source register or pair load where
3953 // the Rt == Rt2. All of those are undefined behaviour.
3954 switch (Inst.getOpcode()) {
3955 case AArch64::LDPSWpre:
3956 case AArch64::LDPWpost:
3957 case AArch64::LDPWpre:
3958 case AArch64::LDPXpost:
3959 case AArch64::LDPXpre: {
3960 unsigned Rt = Inst.getOperand(1).getReg();
3961 unsigned Rt2 = Inst.getOperand(2).getReg();
3962 unsigned Rn = Inst.getOperand(3).getReg();
3963 if (RI->isSubRegisterEq(Rn, Rt))
3964 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3965 "is also a destination");
3966 if (RI->isSubRegisterEq(Rn, Rt2))
3967 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3968 "is also a destination");
3969 LLVM_FALLTHROUGH[[clang::fallthrough]];
3970 }
3971 case AArch64::LDPDi:
3972 case AArch64::LDPQi:
3973 case AArch64::LDPSi:
3974 case AArch64::LDPSWi:
3975 case AArch64::LDPWi:
3976 case AArch64::LDPXi: {
3977 unsigned Rt = Inst.getOperand(0).getReg();
3978 unsigned Rt2 = Inst.getOperand(1).getReg();
3979 if (Rt == Rt2)
3980 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3981 break;
3982 }
3983 case AArch64::LDPDpost:
3984 case AArch64::LDPDpre:
3985 case AArch64::LDPQpost:
3986 case AArch64::LDPQpre:
3987 case AArch64::LDPSpost:
3988 case AArch64::LDPSpre:
3989 case AArch64::LDPSWpost: {
3990 unsigned Rt = Inst.getOperand(1).getReg();
3991 unsigned Rt2 = Inst.getOperand(2).getReg();
3992 if (Rt == Rt2)
3993 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3994 break;
3995 }
3996 case AArch64::STPDpost:
3997 case AArch64::STPDpre:
3998 case AArch64::STPQpost:
3999 case AArch64::STPQpre:
4000 case AArch64::STPSpost:
4001 case AArch64::STPSpre:
4002 case AArch64::STPWpost:
4003 case AArch64::STPWpre:
4004 case AArch64::STPXpost:
4005 case AArch64::STPXpre: {
4006 unsigned Rt = Inst.getOperand(1).getReg();
4007 unsigned Rt2 = Inst.getOperand(2).getReg();
4008 unsigned Rn = Inst.getOperand(3).getReg();
4009 if (RI->isSubRegisterEq(Rn, Rt))
4010 return Error(Loc[0], "unpredictable STP instruction, writeback base "
4011 "is also a source");
4012 if (RI->isSubRegisterEq(Rn, Rt2))
4013 return Error(Loc[1], "unpredictable STP instruction, writeback base "
4014 "is also a source");
4015 break;
4016 }
4017 case AArch64::LDRBBpre:
4018 case AArch64::LDRBpre:
4019 case AArch64::LDRHHpre:
4020 case AArch64::LDRHpre:
4021 case AArch64::LDRSBWpre:
4022 case AArch64::LDRSBXpre:
4023 case AArch64::LDRSHWpre:
4024 case AArch64::LDRSHXpre:
4025 case AArch64::LDRSWpre:
4026 case AArch64::LDRWpre:
4027 case AArch64::LDRXpre:
4028 case AArch64::LDRBBpost:
4029 case AArch64::LDRBpost:
4030 case AArch64::LDRHHpost:
4031 case AArch64::LDRHpost:
4032 case AArch64::LDRSBWpost:
4033 case AArch64::LDRSBXpost:
4034 case AArch64::LDRSHWpost:
4035 case AArch64::LDRSHXpost:
4036 case AArch64::LDRSWpost:
4037 case AArch64::LDRWpost:
4038 case AArch64::LDRXpost: {
4039 unsigned Rt = Inst.getOperand(1).getReg();
4040 unsigned Rn = Inst.getOperand(2).getReg();
4041 if (RI->isSubRegisterEq(Rn, Rt))
4042 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4043 "is also a source");
4044 break;
4045 }
4046 case AArch64::STRBBpost:
4047 case AArch64::STRBpost:
4048 case AArch64::STRHHpost:
4049 case AArch64::STRHpost:
4050 case AArch64::STRWpost:
4051 case AArch64::STRXpost:
4052 case AArch64::STRBBpre:
4053 case AArch64::STRBpre:
4054 case AArch64::STRHHpre:
4055 case AArch64::STRHpre:
4056 case AArch64::STRWpre:
4057 case AArch64::STRXpre: {
4058 unsigned Rt = Inst.getOperand(1).getReg();
4059 unsigned Rn = Inst.getOperand(2).getReg();
4060 if (RI->isSubRegisterEq(Rn, Rt))
4061 return Error(Loc[0], "unpredictable STR instruction, writeback base "
4062 "is also a source");
4063 break;
4064 }
4065 case AArch64::STXRB:
4066 case AArch64::STXRH:
4067 case AArch64::STXRW:
4068 case AArch64::STXRX:
4069 case AArch64::STLXRB:
4070 case AArch64::STLXRH:
4071 case AArch64::STLXRW:
4072 case AArch64::STLXRX: {
4073 unsigned Rs = Inst.getOperand(0).getReg();
4074 unsigned Rt = Inst.getOperand(1).getReg();
4075 unsigned Rn = Inst.getOperand(2).getReg();
4076 if (RI->isSubRegisterEq(Rt, Rs) ||
4077 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4078 return Error(Loc[0],
4079 "unpredictable STXR instruction, status is also a source");
4080 break;
4081 }
4082 case AArch64::STXPW:
4083 case AArch64::STXPX:
4084 case AArch64::STLXPW:
4085 case AArch64::STLXPX: {
4086 unsigned Rs = Inst.getOperand(0).getReg();
4087 unsigned Rt1 = Inst.getOperand(1).getReg();
4088 unsigned Rt2 = Inst.getOperand(2).getReg();
4089 unsigned Rn = Inst.getOperand(3).getReg();
4090 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4091 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4092 return Error(Loc[0],
4093 "unpredictable STXP instruction, status is also a source");
4094 break;
4095 }
4096 case AArch64::LDGV: {
4097 unsigned Rt = Inst.getOperand(0).getReg();
4098 unsigned Rn = Inst.getOperand(1).getReg();
4099 if (RI->isSubRegisterEq(Rt, Rn)) {
4100 return Error(Loc[0],
4101 "unpredictable LDGV instruction, writeback register is also "
4102 "the target register");
4103 }
4104 }
4105 }
4106
4107
4108 // Now check immediate ranges. Separate from the above as there is overlap
4109 // in the instructions being checked and this keeps the nested conditionals
4110 // to a minimum.
4111 switch (Inst.getOpcode()) {
4112 case AArch64::ADDSWri:
4113 case AArch64::ADDSXri:
4114 case AArch64::ADDWri:
4115 case AArch64::ADDXri:
4116 case AArch64::SUBSWri:
4117 case AArch64::SUBSXri:
4118 case AArch64::SUBWri:
4119 case AArch64::SUBXri: {
4120 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4121 // some slight duplication here.
4122 if (Inst.getOperand(2).isExpr()) {
4123 const MCExpr *Expr = Inst.getOperand(2).getExpr();
4124 AArch64MCExpr::VariantKind ELFRefKind;
4125 MCSymbolRefExpr::VariantKind DarwinRefKind;
4126 int64_t Addend;
4127 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4128
4129 // Only allow these with ADDXri.
4130 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4131 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4132 Inst.getOpcode() == AArch64::ADDXri)
4133 return false;
4134
4135 // Only allow these with ADDXri/ADDWri
4136 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4137 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4138 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4139 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4140 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4141 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4142 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4143 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4144 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4145 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4146 (Inst.getOpcode() == AArch64::ADDXri ||
4147 Inst.getOpcode() == AArch64::ADDWri))
4148 return false;
4149
4150 // Don't allow symbol refs in the immediate field otherwise
4151 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4152 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4153 // 'cmp w0, 'borked')
4154 return Error(Loc.back(), "invalid immediate expression");
4155 }
4156 // We don't validate more complex expressions here
4157 }
4158 return false;
4159 }
4160 default:
4161 return false;
4162 }
4163}
4164
4165static std::string AArch64MnemonicSpellCheck(StringRef S, uint64_t FBS,
4166 unsigned VariantID = 0);
4167
4168bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4169 uint64_t ErrorInfo,
4170 OperandVector &Operands) {
4171 switch (ErrCode) {
4172 case Match_InvalidTiedOperand: {
4173 RegConstraintEqualityTy EqTy =
4174 static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4175 .getRegEqualityTy();
4176 switch (EqTy) {
4177 case RegConstraintEqualityTy::EqualsSubReg:
4178 return Error(Loc, "operand must be 64-bit form of destination register");
4179 case RegConstraintEqualityTy::EqualsSuperReg:
4180 return Error(Loc, "operand must be 32-bit form of destination register");
4181 case RegConstraintEqualityTy::EqualsReg:
4182 return Error(Loc, "operand must match destination register");
4183 }
4184 llvm_unreachable("Unknown RegConstraintEqualityTy")::llvm::llvm_unreachable_internal("Unknown RegConstraintEqualityTy"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4184)
;
4185 }
4186 case Match_MissingFeature:
4187 return Error(Loc,
4188 "instruction requires a CPU feature not currently enabled");
4189 case Match_InvalidOperand:
4190 return Error(Loc, "invalid operand for instruction");
4191 case Match_InvalidSuffix:
4192 return Error(Loc, "invalid type suffix for instruction");
4193 case Match_InvalidCondCode:
4194 return Error(Loc, "expected AArch64 condition code");
4195 case Match_AddSubRegExtendSmall:
4196 return Error(Loc,
4197 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
4198 case Match_AddSubRegExtendLarge:
4199 return Error(Loc,
4200 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4201 case Match_AddSubSecondSource:
4202 return Error(Loc,
4203 "expected compatible register, symbol or integer in range [0, 4095]");
4204 case Match_LogicalSecondSource:
4205 return Error(Loc, "expected compatible register or logical immediate");
4206 case Match_InvalidMovImm32Shift:
4207 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4208 case Match_InvalidMovImm64Shift:
4209 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4210 case Match_AddSubRegShift32:
4211 return Error(Loc,
4212 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4213 case Match_AddSubRegShift64:
4214 return Error(Loc,
4215 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4216 case Match_InvalidFPImm:
4217 return Error(Loc,
4218 "expected compatible register or floating-point constant");
4219 case Match_InvalidMemoryIndexedSImm6:
4220 return Error(Loc, "index must be an integer in range [-32, 31].");
4221 case Match_InvalidMemoryIndexedSImm5:
4222 return Error(Loc, "index must be an integer in range [-16, 15].");
4223 case Match_InvalidMemoryIndexed1SImm4:
4224 return Error(Loc, "index must be an integer in range [-8, 7].");
4225 case Match_InvalidMemoryIndexed2SImm4:
4226 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4227 case Match_InvalidMemoryIndexed3SImm4:
4228 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4229 case Match_InvalidMemoryIndexed4SImm4:
4230 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4231 case Match_InvalidMemoryIndexed16SImm4:
4232 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4233 case Match_InvalidMemoryIndexed1SImm6:
4234 return Error(Loc, "index must be an integer in range [-32, 31].");
4235 case Match_InvalidMemoryIndexedSImm8:
4236 return Error(Loc, "index must be an integer in range [-128, 127].");
4237 case Match_InvalidMemoryIndexedSImm9:
4238 return Error(Loc, "index must be an integer in range [-256, 255].");
4239 case Match_InvalidMemoryIndexed16SImm9:
4240 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4241 case Match_InvalidMemoryIndexed8SImm10:
4242 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4243 case Match_InvalidMemoryIndexed4SImm7:
4244 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4245 case Match_InvalidMemoryIndexed8SImm7:
4246 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4247 case Match_InvalidMemoryIndexed16SImm7:
4248 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4249 case Match_InvalidMemoryIndexed8UImm5:
4250 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4251 case Match_InvalidMemoryIndexed4UImm5:
4252 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4253 case Match_InvalidMemoryIndexed2UImm5:
4254 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4255 case Match_InvalidMemoryIndexed8UImm6:
4256 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4257 case Match_InvalidMemoryIndexed16UImm6:
4258 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4259 case Match_InvalidMemoryIndexed4UImm6:
4260 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4261 case Match_InvalidMemoryIndexed2UImm6:
4262 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4263 case Match_InvalidMemoryIndexed1UImm6:
4264 return Error(Loc, "index must be in range [0, 63].");
4265 case Match_InvalidMemoryWExtend8:
4266 return Error(Loc,
4267 "expected 'uxtw' or 'sxtw' with optional shift of #0");
4268 case Match_InvalidMemoryWExtend16:
4269 return Error(Loc,
4270 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4271 case Match_InvalidMemoryWExtend32:
4272 return Error(Loc,
4273 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4274 case Match_InvalidMemoryWExtend64:
4275 return Error(Loc,
4276 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4277 case Match_InvalidMemoryWExtend128:
4278 return Error(Loc,
4279 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4280 case Match_InvalidMemoryXExtend8:
4281 return Error(Loc,
4282 "expected 'lsl' or 'sxtx' with optional shift of #0");
4283 case Match_InvalidMemoryXExtend16:
4284 return Error(Loc,
4285 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4286 case Match_InvalidMemoryXExtend32:
4287 return Error(Loc,
4288 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4289 case Match_InvalidMemoryXExtend64:
4290 return Error(Loc,
4291 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4292 case Match_InvalidMemoryXExtend128:
4293 return Error(Loc,
4294 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4295 case Match_InvalidMemoryIndexed1:
4296 return Error(Loc, "index must be an integer in range [0, 4095].");
4297 case Match_InvalidMemoryIndexed2:
4298 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4299 case Match_InvalidMemoryIndexed4:
4300 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4301 case Match_InvalidMemoryIndexed8:
4302 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4303 case Match_InvalidMemoryIndexed16:
4304 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4305 case Match_InvalidImm0_1:
4306 return Error(Loc, "immediate must be an integer in range [0, 1].");
4307 case Match_InvalidImm0_7:
4308 return Error(Loc, "immediate must be an integer in range [0, 7].");
4309 case Match_InvalidImm0_15:
4310 return Error(Loc, "immediate must be an integer in range [0, 15].");
4311 case Match_InvalidImm0_31:
4312 return Error(Loc, "immediate must be an integer in range [0, 31].");
4313 case Match_InvalidImm0_63:
4314 return Error(Loc, "immediate must be an integer in range [0, 63].");
4315 case Match_InvalidImm0_127:
4316 return Error(Loc, "immediate must be an integer in range [0, 127].");
4317 case Match_InvalidImm0_255:
4318 return Error(Loc, "immediate must be an integer in range [0, 255].");
4319 case Match_InvalidImm0_65535:
4320 return Error(Loc, "immediate must be an integer in range [0, 65535].");
4321 case Match_InvalidImm1_8:
4322 return Error(Loc, "immediate must be an integer in range [1, 8].");
4323 case Match_InvalidImm1_16:
4324 return Error(Loc, "immediate must be an integer in range [1, 16].");
4325 case Match_InvalidImm1_32:
4326 return Error(Loc, "immediate must be an integer in range [1, 32].");
4327 case Match_InvalidImm1_64:
4328 return Error(Loc, "immediate must be an integer in range [1, 64].");
4329 case Match_InvalidSVEAddSubImm8:
4330 return Error(Loc, "immediate must be an integer in range [0, 255]"
4331 " with a shift amount of 0");
4332 case Match_InvalidSVEAddSubImm16:
4333 case Match_InvalidSVEAddSubImm32:
4334 case Match_InvalidSVEAddSubImm64:
4335 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4336 "multiple of 256 in range [256, 65280]");
4337 case Match_InvalidSVECpyImm8:
4338 return Error(Loc, "immediate must be an integer in range [-128, 255]"
4339 " with a shift amount of 0");
4340 case Match_InvalidSVECpyImm16:
4341 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4342 "multiple of 256 in range [-32768, 65280]");
4343 case Match_InvalidSVECpyImm32:
4344 case Match_InvalidSVECpyImm64:
4345 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4346 "multiple of 256 in range [-32768, 32512]");
4347 case Match_InvalidIndexRange1_1:
4348 return Error(Loc, "expected lane specifier '[1]'");
4349 case Match_InvalidIndexRange0_15:
4350 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4351 case Match_InvalidIndexRange0_7:
4352 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4353 case Match_InvalidIndexRange0_3:
4354 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4355 case Match_InvalidIndexRange0_1:
4356 return Error(Loc, "vector lane must be an integer in range [0, 1].");
4357 case Match_InvalidSVEIndexRange0_63:
4358 return Error(Loc, "vector lane must be an integer in range [0, 63].");
4359 case Match_InvalidSVEIndexRange0_31:
4360 return Error(Loc, "vector lane must be an integer in range [0, 31].");
4361 case Match_InvalidSVEIndexRange0_15:
4362 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4363 case Match_InvalidSVEIndexRange0_7:
4364 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4365 case Match_InvalidSVEIndexRange0_3:
4366 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4367 case Match_InvalidLabel:
4368 return Error(Loc, "expected label or encodable integer pc offset");
4369 case Match_MRS:
4370 return Error(Loc, "expected readable system register");
4371 case Match_MSR:
4372 return Error(Loc, "expected writable system register or pstate");
4373 case Match_InvalidComplexRotationEven:
4374 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4375 case Match_InvalidComplexRotationOdd:
4376 return Error(Loc, "complex rotation must be 90 or 270.");
4377 case Match_MnemonicFail: {
4378 std::string Suggestion = AArch64MnemonicSpellCheck(
4379 ((AArch64Operand &)*Operands[0]).getToken(),
4380 ComputeAvailableFeatures(STI->getFeatureBits()));
4381 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4382 }
4383 case Match_InvalidGPR64shifted8:
4384 return Error(Loc, "register must be x0..x30 or xzr, without shift");
4385 case Match_InvalidGPR64shifted16:
4386 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4387 case Match_InvalidGPR64shifted32:
4388 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4389 case Match_InvalidGPR64shifted64:
4390 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4391 case Match_InvalidGPR64NoXZRshifted8:
4392 return Error(Loc, "register must be x0..x30 without shift");
4393 case Match_InvalidGPR64NoXZRshifted16:
4394 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4395 case Match_InvalidGPR64NoXZRshifted32:
4396 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4397 case Match_InvalidGPR64NoXZRshifted64:
4398 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4399 case Match_InvalidZPR32UXTW8:
4400 case Match_InvalidZPR32SXTW8:
4401 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4402 case Match_InvalidZPR32UXTW16:
4403 case Match_InvalidZPR32SXTW16:
4404 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4405 case Match_InvalidZPR32UXTW32:
4406 case Match_InvalidZPR32SXTW32:
4407 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4408 case Match_InvalidZPR32UXTW64:
4409 case Match_InvalidZPR32SXTW64:
4410 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4411 case Match_InvalidZPR64UXTW8:
4412 case Match_InvalidZPR64SXTW8:
4413 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4414 case Match_InvalidZPR64UXTW16:
4415 case Match_InvalidZPR64SXTW16:
4416 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4417 case Match_InvalidZPR64UXTW32:
4418 case Match_InvalidZPR64SXTW32:
4419 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4420 case Match_InvalidZPR64UXTW64:
4421 case Match_InvalidZPR64SXTW64:
4422 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4423 case Match_InvalidZPR32LSL8:
4424 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4425 case Match_InvalidZPR32LSL16:
4426 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4427 case Match_InvalidZPR32LSL32:
4428 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4429 case Match_InvalidZPR32LSL64:
4430 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4431 case Match_InvalidZPR64LSL8:
4432 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4433 case Match_InvalidZPR64LSL16:
4434 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4435 case Match_InvalidZPR64LSL32:
4436 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4437 case Match_InvalidZPR64LSL64:
4438 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4439 case Match_InvalidZPR0:
4440 return Error(Loc, "expected register without element width sufix");
4441 case Match_InvalidZPR8:
4442 case Match_InvalidZPR16:
4443 case Match_InvalidZPR32:
4444 case Match_InvalidZPR64:
4445 case Match_InvalidZPR128:
4446 return Error(Loc, "invalid element width");
4447 case Match_InvalidZPR_3b8:
4448 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4449 case Match_InvalidZPR_3b16:
4450 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4451 case Match_InvalidZPR_3b32:
4452 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4453 case Match_InvalidZPR_4b16:
4454 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4455 case Match_InvalidZPR_4b32:
4456 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4457 case Match_InvalidZPR_4b64:
4458 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4459 case Match_InvalidSVEPattern:
4460 return Error(Loc, "invalid predicate pattern");
4461 case Match_InvalidSVEPredicateAnyReg:
4462 case Match_InvalidSVEPredicateBReg:
4463 case Match_InvalidSVEPredicateHReg:
4464 case Match_InvalidSVEPredicateSReg:
4465 case Match_InvalidSVEPredicateDReg:
4466 return Error(Loc, "invalid predicate register.");
4467 case Match_InvalidSVEPredicate3bAnyReg:
4468 case Match_InvalidSVEPredicate3bBReg:
4469 case Match_InvalidSVEPredicate3bHReg:
4470 case Match_InvalidSVEPredicate3bSReg:
4471 case Match_InvalidSVEPredicate3bDReg:
4472 return Error(Loc, "restricted predicate has range [0, 7].");
4473 case Match_InvalidSVEExactFPImmOperandHalfOne:
4474 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4475 case Match_InvalidSVEExactFPImmOperandHalfTwo:
4476 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4477 case Match_InvalidSVEExactFPImmOperandZeroOne:
4478 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4479 default:
4480 llvm_unreachable("unexpected error code!")::llvm::llvm_unreachable_internal("unexpected error code!", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4480)
;
4481 }
4482}
4483
4484static const char *getSubtargetFeatureName(uint64_t Val);
4485
4486bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4487 OperandVector &Operands,
4488 MCStreamer &Out,
4489 uint64_t &ErrorInfo,
4490 bool MatchingInlineAsm) {
4491 assert(!Operands.empty() && "Unexpect empty operand list!")((!Operands.empty() && "Unexpect empty operand list!"
) ? static_cast<void> (0) : __assert_fail ("!Operands.empty() && \"Unexpect empty operand list!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4491, __PRETTY_FUNCTION__))
;
4492 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4493 assert(Op.isToken() && "Leading operand should always be a mnemonic!")((Op.isToken() && "Leading operand should always be a mnemonic!"
) ? static_cast<void> (0) : __assert_fail ("Op.isToken() && \"Leading operand should always be a mnemonic!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4493, __PRETTY_FUNCTION__))
;
4494
4495 StringRef Tok = Op.getToken();
4496 unsigned NumOperands = Operands.size();
4497
4498 if (NumOperands == 4 && Tok == "lsl") {
4499 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4500 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4501 if (Op2.isScalarReg() && Op3.isImm()) {
4502 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4503 if (Op3CE) {
4504 uint64_t Op3Val = Op3CE->getValue();
4505 uint64_t NewOp3Val = 0;
4506 uint64_t NewOp4Val = 0;
4507 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4508 Op2.getReg())) {
4509 NewOp3Val = (32 - Op3Val) & 0x1f;
4510 NewOp4Val = 31 - Op3Val;
4511 } else {
4512 NewOp3Val = (64 - Op3Val) & 0x3f;
4513 NewOp4Val = 63 - Op3Val;
4514 }
4515
4516 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4517 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4518
4519 Operands[0] = AArch64Operand::CreateToken(
4520 "ubfm", false, Op.getStartLoc(), getContext());
4521 Operands.push_back(AArch64Operand::CreateImm(
4522 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4523 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4524 Op3.getEndLoc(), getContext());
4525 }
4526 }
4527 } else if (NumOperands == 4 && Tok == "bfc") {
4528 // FIXME: Horrible hack to handle BFC->BFM alias.
4529 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4530 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4531 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4532
4533 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4534 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4535 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4536
4537 if (LSBCE && WidthCE) {
4538 uint64_t LSB = LSBCE->getValue();
4539 uint64_t Width = WidthCE->getValue();
4540
4541 uint64_t RegWidth = 0;
4542 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4543 Op1.getReg()))
4544 RegWidth = 64;
4545 else
4546 RegWidth = 32;
4547
4548 if (LSB >= RegWidth)
4549 return Error(LSBOp.getStartLoc(),
4550 "expected integer in range [0, 31]");
4551 if (Width < 1 || Width > RegWidth)
4552 return Error(WidthOp.getStartLoc(),
4553 "expected integer in range [1, 32]");
4554
4555 uint64_t ImmR = 0;
4556 if (RegWidth == 32)
4557 ImmR = (32 - LSB) & 0x1f;
4558 else
4559 ImmR = (64 - LSB) & 0x3f;
4560
4561 uint64_t ImmS = Width - 1;
4562
4563 if (ImmR != 0 && ImmS >= ImmR)
4564 return Error(WidthOp.getStartLoc(),
4565 "requested insert overflows register");
4566
4567 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4568 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4569 Operands[0] = AArch64Operand::CreateToken(
4570 "bfm", false, Op.getStartLoc(), getContext());
4571 Operands[2] = AArch64Operand::CreateReg(
4572 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4573 SMLoc(), SMLoc(), getContext());
4574 Operands[3] = AArch64Operand::CreateImm(
4575 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4576 Operands.emplace_back(
4577 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4578 WidthOp.getEndLoc(), getContext()));
4579 }
4580 }
4581 } else if (NumOperands == 5) {
4582 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4583 // UBFIZ -> UBFM aliases.
4584 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4585 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4586 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4587 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4588
4589 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4590 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4591 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4592
4593 if (Op3CE && Op4CE) {
4594 uint64_t Op3Val = Op3CE->getValue();
4595 uint64_t Op4Val = Op4CE->getValue();
4596
4597 uint64_t RegWidth = 0;
4598 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4599 Op1.getReg()))
4600 RegWidth = 64;
4601 else
4602 RegWidth = 32;
4603
4604 if (Op3Val >= RegWidth)
4605 return Error(Op3.getStartLoc(),
4606 "expected integer in range [0, 31]");
4607 if (Op4Val < 1 || Op4Val > RegWidth)
4608 return Error(Op4.getStartLoc(),
4609 "expected integer in range [1, 32]");
4610
4611 uint64_t NewOp3Val = 0;
4612 if (RegWidth == 32)
4613 NewOp3Val = (32 - Op3Val) & 0x1f;
4614 else
4615 NewOp3Val = (64 - Op3Val) & 0x3f;
4616
4617 uint64_t NewOp4Val = Op4Val - 1;
4618
4619 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4620 return Error(Op4.getStartLoc(),
4621 "requested insert overflows register");
4622
4623 const MCExpr *NewOp3 =
4624 MCConstantExpr::create(NewOp3Val, getContext());
4625 const MCExpr *NewOp4 =
4626 MCConstantExpr::create(NewOp4Val, getContext());
4627 Operands[3] = AArch64Operand::CreateImm(
4628 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4629 Operands[4] = AArch64Operand::CreateImm(
4630 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4631 if (Tok == "bfi")
4632 Operands[0] = AArch64Operand::CreateToken(
4633 "bfm", false, Op.getStartLoc(), getContext());
4634 else if (Tok == "sbfiz")
4635 Operands[0] = AArch64Operand::CreateToken(
4636 "sbfm", false, Op.getStartLoc(), getContext());
4637 else if (Tok == "ubfiz")
4638 Operands[0] = AArch64Operand::CreateToken(
4639 "ubfm", false, Op.getStartLoc(), getContext());
4640 else
4641 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4641)
;
4642 }
4643 }
4644
4645 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4646 // UBFX -> UBFM aliases.
4647 } else if (NumOperands == 5 &&
4648 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4649 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4650 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4651 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4652
4653 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4654 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4655 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4656
4657 if (Op3CE && Op4CE) {
4658 uint64_t Op3Val = Op3CE->getValue();
4659 uint64_t Op4Val = Op4CE->getValue();
4660
4661 uint64_t RegWidth = 0;
4662 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4663 Op1.getReg()))
4664 RegWidth = 64;
4665 else
4666 RegWidth = 32;
4667
4668 if (Op3Val >= RegWidth)
4669 return Error(Op3.getStartLoc(),
4670 "expected integer in range [0, 31]");
4671 if (Op4Val < 1 || Op4Val > RegWidth)
4672 return Error(Op4.getStartLoc(),
4673 "expected integer in range [1, 32]");
4674
4675 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4676
4677 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4678 return Error(Op4.getStartLoc(),
4679 "requested extract overflows register");
4680
4681 const MCExpr *NewOp4 =
4682 MCConstantExpr::create(NewOp4Val, getContext());
4683 Operands[4] = AArch64Operand::CreateImm(
4684 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4685 if (Tok == "bfxil")
4686 Operands[0] = AArch64Operand::CreateToken(
4687 "bfm", false, Op.getStartLoc(), getContext());
4688 else if (Tok == "sbfx")
4689 Operands[0] = AArch64Operand::CreateToken(
4690 "sbfm", false, Op.getStartLoc(), getContext());
4691 else if (Tok == "ubfx")
4692 Operands[0] = AArch64Operand::CreateToken(
4693 "ubfm", false, Op.getStartLoc(), getContext());
4694 else
4695 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4695)
;
4696 }
4697 }
4698 }
4699 }
4700
4701 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4702 // instruction for FP registers correctly in some rare circumstances. Convert
4703 // it to a safe instruction and warn (because silently changing someone's
4704 // assembly is rude).
4705 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4706 NumOperands == 4 && Tok == "movi") {
4707 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4708 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4709 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4710 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4711 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4712 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4713 if (Suffix.lower() == ".2d" &&
4714 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4715 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4716 " correctly on this CPU, converting to equivalent movi.16b");
4717 // Switch the suffix to .16b.
4718 unsigned Idx = Op1.isToken() ? 1 : 2;
4719 Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4720 getContext());
4721 }
4722 }
4723 }
4724
4725 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4726 // InstAlias can't quite handle this since the reg classes aren't
4727 // subclasses.
4728 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4729 // The source register can be Wn here, but the matcher expects a
4730 // GPR64. Twiddle it here if necessary.
4731 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4732 if (Op.isScalarReg()) {
4733 unsigned Reg = getXRegFromWReg(Op.getReg());
4734 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4735 Op.getStartLoc(), Op.getEndLoc(),
4736 getContext());
4737 }
4738 }
4739 // FIXME: Likewise for sxt[bh] with a Xd dst operand
4740 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4741 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4742 if (Op.isScalarReg() &&
4743 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4744 Op.getReg())) {
4745 // The source register can be Wn here, but the matcher expects a
4746 // GPR64. Twiddle it here if necessary.
4747 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4748 if (Op.isScalarReg()) {
4749 unsigned Reg = getXRegFromWReg(Op.getReg());
4750 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4751 Op.getStartLoc(),
4752 Op.getEndLoc(), getContext());
4753 }
4754 }
4755 }
4756 // FIXME: Likewise for uxt[bh] with a Xd dst operand
4757 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4758 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4759 if (Op.isScalarReg() &&
4760 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4761 Op.getReg())) {
4762 // The source register can be Wn here, but the matcher expects a
4763 // GPR32. Twiddle it here if necessary.
4764 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4765 if (Op.isScalarReg()) {
4766 unsigned Reg = getWRegFromXReg(Op.getReg());
4767 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4768 Op.getStartLoc(),
4769 Op.getEndLoc(), getContext());
4770 }
4771 }
4772 }
4773
4774 MCInst Inst;
4775 // First try to match against the secondary set of tables containing the
4776 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4777 unsigned MatchResult =
4778 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4779
4780 // If that fails, try against the alternate table containing long-form NEON:
4781 // "fadd v0.2s, v1.2s, v2.2s"
4782 if (MatchResult != Match_Success) {
4783 // But first, save the short-form match result: we can use it in case the
4784 // long-form match also fails.
4785 auto ShortFormNEONErrorInfo = ErrorInfo;
4786 auto ShortFormNEONMatchResult = MatchResult;
4787
4788 MatchResult =
4789 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4790
4791 // Now, both matches failed, and the long-form match failed on the mnemonic
4792 // suffix token operand. The short-form match failure is probably more
4793 // relevant: use it instead.
4794 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4795 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4796 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4797 MatchResult = ShortFormNEONMatchResult;
4798 ErrorInfo = ShortFormNEONErrorInfo;
4799 }
4800 }
4801
4802 switch (MatchResult) {
4803 case Match_Success: {
4804 // Perform range checking and other semantic validations
4805 SmallVector<SMLoc, 8> OperandLocs;
4806 NumOperands = Operands.size();
4807 for (unsigned i = 1; i < NumOperands; ++i)
4808 OperandLocs.push_back(Operands[i]->getStartLoc());
4809 if (validateInstruction(Inst, IDLoc, OperandLocs))
4810 return true;
4811
4812 Inst.setLoc(IDLoc);
4813 Out.EmitInstruction(Inst, getSTI());
4814 return false;
4815 }
4816 case Match_MissingFeature: {
4817 assert(ErrorInfo && "Unknown missing feature!")((ErrorInfo && "Unknown missing feature!") ? static_cast
<void> (0) : __assert_fail ("ErrorInfo && \"Unknown missing feature!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4817, __PRETTY_FUNCTION__))
;
4818 // Special case the error message for the very common case where only
4819 // a single subtarget feature is missing (neon, e.g.).
4820 std::string Msg = "instruction requires:";
4821 uint64_t Mask = 1;
4822 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4823 if (ErrorInfo & Mask) {
4824 Msg += " ";
4825 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4826 }
4827 Mask <<= 1;
4828 }
4829 return Error(IDLoc, Msg);
4830 }
4831 case Match_MnemonicFail:
4832 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
4833 case Match_InvalidOperand: {
4834 SMLoc ErrorLoc = IDLoc;
4835
4836 if (ErrorInfo != ~0ULL) {
4837 if (ErrorInfo >= Operands.size())
4838 return Error(IDLoc, "too few operands for instruction",
4839 SMRange(IDLoc, getTok().getLoc()));
4840
4841 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4842 if (ErrorLoc == SMLoc())
4843 ErrorLoc = IDLoc;
4844 }
4845 // If the match failed on a suffix token operand, tweak the diagnostic
4846 // accordingly.
4847 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4848 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4849 MatchResult = Match_InvalidSuffix;
4850
4851 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4852 }
4853 case Match_InvalidTiedOperand:
4854 case Match_InvalidMemoryIndexed1:
4855 case Match_InvalidMemoryIndexed2:
4856 case Match_InvalidMemoryIndexed4:
4857 case Match_InvalidMemoryIndexed8:
4858 case Match_InvalidMemoryIndexed16:
4859 case Match_InvalidCondCode:
4860 case Match_AddSubRegExtendSmall:
4861 case Match_AddSubRegExtendLarge:
4862 case Match_AddSubSecondSource:
4863 case Match_LogicalSecondSource:
4864 case Match_AddSubRegShift32:
4865 case Match_AddSubRegShift64:
4866 case Match_InvalidMovImm32Shift:
4867 case Match_InvalidMovImm64Shift:
4868 case Match_InvalidFPImm:
4869 case Match_InvalidMemoryWExtend8:
4870 case Match_InvalidMemoryWExtend16:
4871 case Match_InvalidMemoryWExtend32:
4872 case Match_InvalidMemoryWExtend64:
4873 case Match_InvalidMemoryWExtend128:
4874 case Match_InvalidMemoryXExtend8:
4875 case Match_InvalidMemoryXExtend16:
4876 case Match_InvalidMemoryXExtend32:
4877 case Match_InvalidMemoryXExtend64:
4878 case Match_InvalidMemoryXExtend128:
4879 case Match_InvalidMemoryIndexed1SImm4:
4880 case Match_InvalidMemoryIndexed2SImm4:
4881 case Match_InvalidMemoryIndexed3SImm4:
4882 case Match_InvalidMemoryIndexed4SImm4:
4883 case Match_InvalidMemoryIndexed1SImm6:
4884 case Match_InvalidMemoryIndexed16SImm4:
4885 case Match_InvalidMemoryIndexed4SImm7:
4886 case Match_InvalidMemoryIndexed8SImm7:
4887 case Match_InvalidMemoryIndexed16SImm7:
4888 case Match_InvalidMemoryIndexed8UImm5:
4889 case Match_InvalidMemoryIndexed4UImm5:
4890 case Match_InvalidMemoryIndexed2UImm5:
4891 case Match_InvalidMemoryIndexed1UImm6:
4892 case Match_InvalidMemoryIndexed2UImm6:
4893 case Match_InvalidMemoryIndexed4UImm6:
4894 case Match_InvalidMemoryIndexed8UImm6:
4895 case Match_InvalidMemoryIndexed16UImm6:
4896 case Match_InvalidMemoryIndexedSImm6:
4897 case Match_InvalidMemoryIndexedSImm5:
4898 case Match_InvalidMemoryIndexedSImm8:
4899 case Match_InvalidMemoryIndexedSImm9:
4900 case Match_InvalidMemoryIndexed16SImm9:
4901 case Match_InvalidMemoryIndexed8SImm10:
4902 case Match_InvalidImm0_1:
4903 case Match_InvalidImm0_7:
4904 case Match_InvalidImm0_15:
4905 case Match_InvalidImm0_31:
4906 case Match_InvalidImm0_63:
4907 case Match_InvalidImm0_127:
4908 case Match_InvalidImm0_255:
4909 case Match_InvalidImm0_65535:
4910 case Match_InvalidImm1_8:
4911 case Match_InvalidImm1_16:
4912 case Match_InvalidImm1_32:
4913 case Match_InvalidImm1_64:
4914 case Match_InvalidSVEAddSubImm8:
4915 case Match_InvalidSVEAddSubImm16:
4916 case Match_InvalidSVEAddSubImm32:
4917 case Match_InvalidSVEAddSubImm64:
4918 case Match_InvalidSVECpyImm8:
4919 case Match_InvalidSVECpyImm16:
4920 case Match_InvalidSVECpyImm32:
4921 case Match_InvalidSVECpyImm64:
4922 case Match_InvalidIndexRange1_1:
4923 case Match_InvalidIndexRange0_15:
4924 case Match_InvalidIndexRange0_7:
4925 case Match_InvalidIndexRange0_3:
4926 case Match_InvalidIndexRange0_1:
4927 case Match_InvalidSVEIndexRange0_63:
4928 case Match_InvalidSVEIndexRange0_31:
4929 case Match_InvalidSVEIndexRange0_15:
4930 case Match_InvalidSVEIndexRange0_7:
4931 case Match_InvalidSVEIndexRange0_3:
4932 case Match_InvalidLabel:
4933 case Match_InvalidComplexRotationEven:
4934 case Match_InvalidComplexRotationOdd:
4935 case Match_InvalidGPR64shifted8:
4936 case Match_InvalidGPR64shifted16:
4937 case Match_InvalidGPR64shifted32:
4938 case Match_InvalidGPR64shifted64:
4939 case Match_InvalidGPR64NoXZRshifted8:
4940 case Match_InvalidGPR64NoXZRshifted16:
4941 case Match_InvalidGPR64NoXZRshifted32:
4942 case Match_InvalidGPR64NoXZRshifted64:
4943 case Match_InvalidZPR32UXTW8:
4944 case Match_InvalidZPR32UXTW16:
4945 case Match_InvalidZPR32UXTW32:
4946 case Match_InvalidZPR32UXTW64:
4947 case Match_InvalidZPR32SXTW8:
4948 case Match_InvalidZPR32SXTW16:
4949 case Match_InvalidZPR32SXTW32:
4950 case Match_InvalidZPR32SXTW64:
4951 case Match_InvalidZPR64UXTW8:
4952 case Match_InvalidZPR64SXTW8:
4953 case Match_InvalidZPR64UXTW16:
4954 case Match_InvalidZPR64SXTW16:
4955 case Match_InvalidZPR64UXTW32:
4956 case Match_InvalidZPR64SXTW32:
4957 case Match_InvalidZPR64UXTW64:
4958 case Match_InvalidZPR64SXTW64:
4959 case Match_InvalidZPR32LSL8:
4960 case Match_InvalidZPR32LSL16:
4961 case Match_InvalidZPR32LSL32:
4962 case Match_InvalidZPR32LSL64:
4963 case Match_InvalidZPR64LSL8:
4964 case Match_InvalidZPR64LSL16:
4965 case Match_InvalidZPR64LSL32:
4966 case Match_InvalidZPR64LSL64:
4967 case Match_InvalidZPR0:
4968 case Match_InvalidZPR8:
4969 case Match_InvalidZPR16:
4970 case Match_InvalidZPR32:
4971 case Match_InvalidZPR64:
4972 case Match_InvalidZPR128:
4973 case Match_InvalidZPR_3b8:
4974 case Match_InvalidZPR_3b16:
4975 case Match_InvalidZPR_3b32:
4976 case Match_InvalidZPR_4b16:
4977 case Match_InvalidZPR_4b32:
4978 case Match_InvalidZPR_4b64:
4979 case Match_InvalidSVEPredicateAnyReg:
4980 case Match_InvalidSVEPattern:
4981 case Match_InvalidSVEPredicateBReg:
4982 case Match_InvalidSVEPredicateHReg:
4983 case Match_InvalidSVEPredicateSReg:
4984 case Match_InvalidSVEPredicateDReg:
4985 case Match_InvalidSVEPredicate3bAnyReg:
4986 case Match_InvalidSVEPredicate3bBReg:
4987 case Match_InvalidSVEPredicate3bHReg:
4988 case Match_InvalidSVEPredicate3bSReg:
4989 case Match_InvalidSVEPredicate3bDReg:
4990 case Match_InvalidSVEExactFPImmOperandHalfOne:
4991 case Match_InvalidSVEExactFPImmOperandHalfTwo:
4992 case Match_InvalidSVEExactFPImmOperandZeroOne:
4993 case Match_MSR:
4994 case Match_MRS: {
4995 if (ErrorInfo >= Operands.size())
4996 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
4997 // Any time we get here, there's nothing fancy to do. Just get the
4998 // operand SMLoc and display the diagnostic.
4999 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5000 if (ErrorLoc == SMLoc())
5001 ErrorLoc = IDLoc;
5002 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5003 }
5004 }
5005
5006 llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5006)
;
5007}
5008
5009/// ParseDirective parses the arm specific directives
5010bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5011 const MCObjectFileInfo::Environment Format =
5012 getContext().getObjectFileInfo()->getObjectFileType();
5013 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
5014
5015 StringRef IDVal = DirectiveID.getIdentifier();
5016 SMLoc Loc = DirectiveID.getLoc();
5017 if (IDVal == ".arch")
5018 parseDirectiveArch(Loc);
5019 else if (IDVal == ".cpu")
5020 parseDirectiveCPU(Loc);
5021 else if (IDVal == ".tlsdesccall")
5022 parseDirectiveTLSDescCall(Loc);
5023 else if (IDVal == ".ltorg" || IDVal == ".pool")
5024 parseDirectiveLtorg(Loc);
5025 else if (IDVal == ".unreq")
5026 parseDirectiveUnreq(Loc);
5027 else if (IDVal == ".inst")
5028 parseDirectiveInst(Loc);
5029 else if (IsMachO) {
5030 if (IDVal == MCLOHDirectiveName())
5031 parseDirectiveLOH(IDVal, Loc);
5032 else
5033 return true;
5034 } else
5035 return true;
5036 return false;
5037}
5038
5039static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
5040 SmallVector<StringRef, 4> &RequestedExtensions) {
5041 const bool NoCrypto =
5042 (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5043 "nocrypto") != std::end(RequestedExtensions));
5044 const bool Crypto =
5045 (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5046 "crypto") != std::end(RequestedExtensions));
5047
5048 if (!NoCrypto && Crypto) {
5049 switch (ArchKind) {
5050 default:
5051 // Map 'generic' (and others) to sha2 and aes, because
5052 // that was the traditional meaning of crypto.
5053 case AArch64::ArchKind::ARMV8_1A:
5054 case AArch64::ArchKind::ARMV8_2A:
5055 case AArch64::ArchKind::ARMV8_3A:
5056 RequestedExtensions.push_back("sha2");
5057 RequestedExtensions.push_back("aes");
5058 break;
5059 case AArch64::ArchKind::ARMV8_4A:
5060 case AArch64::ArchKind::ARMV8_5A:
5061 RequestedExtensions.push_back("sm4");
5062 RequestedExtensions.push_back("sha3");
5063 RequestedExtensions.push_back("sha2");
5064 RequestedExtensions.push_back("aes");
5065 break;
5066 }
5067 } else if (NoCrypto) {
5068 switch (ArchKind) {
5069 default:
5070 // Map 'generic' (and others) to sha2 and aes, because
5071 // that was the traditional meaning of crypto.
5072 case AArch64::ArchKind::ARMV8_1A:
5073 case AArch64::ArchKind::ARMV8_2A:
5074 case AArch64::ArchKind::ARMV8_3A:
5075 RequestedExtensions.push_back("nosha2");
5076 RequestedExtensions.push_back("noaes");
5077 break;
5078 case AArch64::ArchKind::ARMV8_4A:
5079 case AArch64::ArchKind::ARMV8_5A:
5080 RequestedExtensions.push_back("nosm4");
5081 RequestedExtensions.push_back("nosha3");
5082 RequestedExtensions.push_back("nosha2");
5083 RequestedExtensions.push_back("noaes");
5084 break;
5085 }
5086 }
5087}
5088
5089/// parseDirectiveArch
5090/// ::= .arch token
5091bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
5092 SMLoc ArchLoc = getLoc();
5093
5094 StringRef Arch, ExtensionString;
5095 std::tie(Arch, ExtensionString) =
5096 getParser().parseStringToEndOfStatement().trim().split('+');
5097
5098 AArch64::ArchKind ID = AArch64::parseArch(Arch);
5099 if (ID == AArch64::ArchKind::INVALID)
5100 return Error(ArchLoc, "unknown arch name");
5101
5102 if (parseToken(AsmToken::EndOfStatement))
5103 return true;
5104
5105 // Get the architecture and extension features.
5106 std::vector<StringRef> AArch64Features;
5107 AArch64::getArchFeatures(ID, AArch64Features);
5108 AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
5109 AArch64Features);
5110
5111 MCSubtargetInfo &STI = copySTI();
5112 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
5113 STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
5114
5115 SmallVector<StringRef, 4> RequestedExtensions;
5116 if (!ExtensionString.empty())
5117 ExtensionString.split(RequestedExtensions, '+');
5118
5119 ExpandCryptoAEK(ID, RequestedExtensions);
5120
5121 FeatureBitset Features = STI.getFeatureBits();
5122 for (auto Name : RequestedExtensions) {
5123 bool EnableFeature = true;
5124
5125 if (Name.startswith_lower("no")) {
5126 EnableFeature = false;
5127 Name = Name.substr(2);
5128 }
5129
5130 for (const auto &Extension : ExtensionMap) {
5131 if (Extension.Name != Name)
5132 continue;
5133
5134 if (Extension.Features.none())
5135 report_fatal_error("unsupported architectural extension: " + Name);
5136
5137 FeatureBitset ToggleFeatures = EnableFeature
5138 ? (~Features & Extension.Features)
5139 : ( Features & Extension.Features);
5140 uint64_t Features =
5141 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5142 setAvailableFeatures(Features);
5143 break;
5144 }
5145 }
5146 return false;
5147}
5148
5149static SMLoc incrementLoc(SMLoc L, int Offset) {
5150 return SMLoc::getFromPointer(L.getPointer() + Offset);
5151}
5152
5153/// parseDirectiveCPU
5154/// ::= .cpu id
5155bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5156 SMLoc CurLoc = getLoc();
5157
5158 StringRef CPU, ExtensionString;
5159 std::tie(CPU, ExtensionString) =
5160 getParser().parseStringToEndOfStatement().trim().split('+');
5161
5162 if (parseToken(AsmToken::EndOfStatement))
5163 return true;
5164
5165 SmallVector<StringRef, 4> RequestedExtensions;
5166 if (!ExtensionString.empty())
5167 ExtensionString.split(RequestedExtensions, '+');
5168
5169 // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5170 // once that is tablegen'ed
5171 if (!getSTI().isCPUStringValid(CPU)) {
5172 Error(CurLoc, "unknown CPU name");
5173 return false;
5174 }
5175
5176 MCSubtargetInfo &STI = copySTI();
5177 STI.setDefaultFeatures(CPU, "");
5178 CurLoc = incrementLoc(CurLoc, CPU.size());
5179
5180 ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5181
5182 FeatureBitset Features = STI.getFeatureBits();
5183 for (auto Name : RequestedExtensions) {
5184 // Advance source location past '+'.
5185 CurLoc = incrementLoc(CurLoc, 1);
5186
5187 bool EnableFeature = true;
5188
5189 if (Name.startswith_lower("no")) {
5190 EnableFeature = false;
5191 Name = Name.substr(2);
5192 }
5193
5194 bool FoundExtension = false;
5195 for (const auto &Extension : ExtensionMap) {
5196 if (Extension.Name != Name)
5197 continue;
5198
5199 if (Extension.Features.none())
5200 report_fatal_error("unsupported architectural extension: " + Name);
5201
5202 FeatureBitset ToggleFeatures = EnableFeature
5203 ? (~Features & Extension.Features)
5204 : ( Features & Extension.Features);
5205 uint64_t Features =
5206 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5207 setAvailableFeatures(Features);
5208 FoundExtension = true;
5209
5210 break;
5211 }
5212
5213 if (!FoundExtension)
5214 Error(CurLoc, "unsupported architectural extension");
5215
5216 CurLoc = incrementLoc(CurLoc, Name.size());
5217 }
5218 return false;
5219}
5220
5221/// parseDirectiveInst
5222/// ::= .inst opcode [, ...]
5223bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5224 if (getLexer().is(AsmToken::EndOfStatement))
5225 return Error(Loc, "expected expression following '.inst' directive");
5226
5227 auto parseOp = [&]() -> bool {
5228 SMLoc L = getLoc();
5229 const MCExpr *Expr;
5230 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5231 return true;
5232 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5233 if (check(!Value, L, "expected constant expression"))
5234 return true;
5235 getTargetStreamer().emitInst(Value->getValue());
5236 return false;
5237 };
5238
5239 if (parseMany(parseOp))
5240 return addErrorSuffix(" in '.inst' directive");
5241 return false;
5242}
5243
5244// parseDirectiveTLSDescCall:
5245// ::= .tlsdesccall symbol
5246bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5247 StringRef Name;
5248 if (check(getParser().parseIdentifier(Name), L,
5249 "expected symbol after directive") ||
5250 parseToken(AsmToken::EndOfStatement))
5251 return true;
5252
5253 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5254 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5255 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5256
5257 MCInst Inst;
5258 Inst.setOpcode(AArch64::TLSDESCCALL);
5259 Inst.addOperand(MCOperand::createExpr(Expr));
5260
5261 getParser().getStreamer().EmitInstruction(Inst, getSTI());
5262 return false;
5263}
5264
5265/// ::= .loh <lohName | lohId> label1, ..., labelN
5266/// The number of arguments depends on the loh identifier.
5267bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5268 MCLOHType Kind;
5269 if (getParser().getTok().isNot(AsmToken::Identifier)) {
5270 if (getParser().getTok().isNot(AsmToken::Integer))
5271 return TokError("expected an identifier or a number in directive");
5272 // We successfully get a numeric value for the identifier.
5273 // Check if it is valid.
5274 int64_t Id = getParser().getTok().getIntVal();
5275 if (Id <= -1U && !isValidMCLOHType(Id))
5276 return TokError("invalid numeric identifier in directive");
5277 Kind = (MCLOHType)Id;
5278 } else {
5279 StringRef Name = getTok().getIdentifier();
5280 // We successfully parse an identifier.
5281 // Check if it is a recognized one.
5282 int Id = MCLOHNameToId(Name);
5283
5284 if (Id == -1)
5285 return TokError("invalid identifier in directive");
5286 Kind = (MCLOHType)Id;
5287 }
5288 // Consume the identifier.
5289 Lex();
5290 // Get the number of arguments of this LOH.
5291 int NbArgs = MCLOHIdToNbArgs(Kind);
5292
5293 assert(NbArgs != -1 && "Invalid number of arguments")((NbArgs != -1 && "Invalid number of arguments") ? static_cast
<void> (0) : __assert_fail ("NbArgs != -1 && \"Invalid number of arguments\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5293, __PRETTY_FUNCTION__))
;
5294
5295 SmallVector<MCSymbol *, 3> Args;
5296 for (int Idx = 0; Idx < NbArgs; ++Idx) {
5297 StringRef Name;
5298 if (getParser().parseIdentifier(Name))
5299 return TokError("expected identifier in directive");
5300 Args.push_back(getContext().getOrCreateSymbol(Name));
5301
5302 if (Idx + 1 == NbArgs)
5303 break;
5304 if (parseToken(AsmToken::Comma,
5305 "unexpected token in '" + Twine(IDVal) + "' directive"))
5306 return true;
5307 }
5308 if (parseToken(AsmToken::EndOfStatement,
5309 "unexpected token in '" + Twine(IDVal) + "' directive"))
5310 return true;
5311
5312 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
5313 return false;
5314}
5315
5316/// parseDirectiveLtorg
5317/// ::= .ltorg | .pool
5318bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5319 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5320 return true;
5321 getTargetStreamer().emitCurrentConstantPool();
5322 return false;
5323}
5324
5325/// parseDirectiveReq
5326/// ::= name .req registername
5327bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5328 MCAsmParser &Parser = getParser();
5329 Parser.Lex(); // Eat the '.req' token.
5330 SMLoc SRegLoc = getLoc();
5331 RegKind RegisterKind = RegKind::Scalar;
5332 unsigned RegNum;
5333 OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5334
5335 if (ParseRes != MatchOperand_Success) {
5336 StringRef Kind;
5337 RegisterKind = RegKind::NeonVector;
5338 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5339
5340 if (ParseRes == MatchOperand_ParseFail)
5341 return true;
5342
5343 if (ParseRes == MatchOperand_Success && !Kind.empty())
5344 return Error(SRegLoc, "vector register without type specifier expected");
5345 }
5346
5347 if (ParseRes != MatchOperand_Success) {
5348 StringRef Kind;
5349 RegisterKind = RegKind::SVEDataVector;
5350 ParseRes =
5351 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5352
5353 if (ParseRes == MatchOperand_ParseFail)
5354 return true;
5355
5356 if (ParseRes == MatchOperand_Success && !Kind.empty())
5357 return Error(SRegLoc,
5358 "sve vector register without type specifier expected");
5359 }
5360
5361 if (ParseRes != MatchOperand_Success) {
5362 StringRef Kind;
5363 RegisterKind = RegKind::SVEPredicateVector;
5364 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5365
5366 if (ParseRes == MatchOperand_ParseFail)
5367 return true;
5368
5369 if (ParseRes == MatchOperand_Success && !Kind.empty())
5370 return Error(SRegLoc,
5371 "sve predicate register without type specifier expected");
5372 }
5373
5374 if (ParseRes != MatchOperand_Success)
5375 return Error(SRegLoc, "register name or alias expected");
5376
5377 // Shouldn't be anything else.
5378 if (parseToken(AsmToken::EndOfStatement,
5379 "unexpected input in .req directive"))
5380 return true;
5381
5382 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
5383 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
5384 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
5385
5386 return false;
5387}
5388
5389/// parseDirectiveUneq
5390/// ::= .unreq registername
5391bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
5392 MCAsmParser &Parser = getParser();
5393 if (getTok().isNot(AsmToken::Identifier))
5394 return TokError("unexpected input in .unreq directive.");
5395 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
5396 Parser.Lex(); // Eat the identifier.
5397 if (parseToken(AsmToken::EndOfStatement))
5398 return addErrorSuffix("in '.unreq' directive");
5399 return false;
5400}
5401
5402bool
5403AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
5404 AArch64MCExpr::VariantKind &ELFRefKind,
5405 MCSymbolRefExpr::VariantKind &DarwinRefKind,
5406 int64_t &Addend) {
5407 ELFRefKind = AArch64MCExpr::VK_INVALID;
5408 DarwinRefKind = MCSymbolRefExpr::VK_None;
5409 Addend = 0;
5410
5411 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
5412 ELFRefKind = AE->getKind();
5413 Expr = AE->getSubExpr();
5414 }
5415
5416 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
5417 if (SE) {
5418 // It's a simple symbol reference with no addend.
5419 DarwinRefKind = SE->getKind();
5420 return true;
5421 }
5422
5423 // Check that it looks like a symbol + an addend
5424 MCValue Res;
5425 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
5426 if (!Relocatable || !Res.getSymA() || Res.getSymB())
5427 return false;
5428
5429 DarwinRefKind = Res.getSymA()->getKind();
5430 Addend = Res.getConstant();
5431
5432 // It's some symbol reference + a constant addend, but really
5433 // shouldn't use both Darwin and ELF syntax.
5434 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
5435 DarwinRefKind == MCSymbolRefExpr::VK_None;
5436}
5437
5438/// Force static initialization.
5439extern "C" void LLVMInitializeAArch64AsmParser() {
5440 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
5441 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
5442 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
5443}
5444
5445#define GET_REGISTER_MATCHER
5446#define GET_SUBTARGET_FEATURE_NAME
5447#define GET_MATCHER_IMPLEMENTATION
5448#define GET_MNEMONIC_SPELL_CHECKER
5449#include "AArch64GenAsmMatcher.inc"
5450
5451// Define this matcher function after the auto-generated include so we
5452// have the match class enum definitions.
5453unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
5454 unsigned Kind) {
5455 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
5456 // If the kind is a token for a literal immediate, check if our asm
5457 // operand matches. This is for InstAliases which have a fixed-value
5458 // immediate in the syntax.
5459 int64_t ExpectedVal;
5460 switch (Kind) {
5461 default:
5462 return Match_InvalidOperand;
5463 case MCK__35_0:
5464 ExpectedVal = 0;
5465 break;
5466 case MCK__35_1:
5467 ExpectedVal = 1;
5468 break;
5469 case MCK__35_12:
5470 ExpectedVal = 12;
5471 break;
5472 case MCK__35_16:
5473 ExpectedVal = 16;
5474 break;
5475 case MCK__35_2:
5476 ExpectedVal = 2;
5477 break;
5478 case MCK__35_24:
5479 ExpectedVal = 24;
5480 break;
5481 case MCK__35_3:
5482 ExpectedVal = 3;
5483 break;
5484 case MCK__35_32:
5485 ExpectedVal = 32;
5486 break;
5487 case MCK__35_4:
5488 ExpectedVal = 4;
5489 break;
5490 case MCK__35_48:
5491 ExpectedVal = 48;
5492 break;
5493 case MCK__35_6:
5494 ExpectedVal = 6;
5495 break;
5496 case MCK__35_64:
5497 ExpectedVal = 64;
5498 break;
5499 case MCK__35_8:
5500 ExpectedVal = 8;
5501 break;
5502 }
5503 if (!Op.isImm())
5504 return Match_InvalidOperand;
5505 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
5506 if (!CE)
5507 return Match_InvalidOperand;
5508 if (CE->getValue() == ExpectedVal)
5509 return Match_Success;
5510 return Match_InvalidOperand;
5511}
5512
5513OperandMatchResultTy
5514AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
5515
5516 SMLoc S = getLoc();
5517
5518 if (getParser().getTok().isNot(AsmToken::Identifier)) {
5519 Error(S, "expected register");
5520 return MatchOperand_ParseFail;
5521 }
5522
5523 unsigned FirstReg;
5524 OperandMatchResultTy Res = tryParseScalarRegister(FirstReg);
5525 if (Res != MatchOperand_Success)
5526 return MatchOperand_ParseFail;
5527
5528 const MCRegisterClass &WRegClass =
5529 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
5530 const MCRegisterClass &XRegClass =
5531 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
5532
5533 bool isXReg = XRegClass.contains(FirstReg),
5534 isWReg = WRegClass.contains(FirstReg);
5535 if (!isXReg && !isWReg) {
5536 Error(S, "expected first even register of a "
5537 "consecutive same-size even/odd register pair");
5538 return MatchOperand_ParseFail;
5539 }
5540
5541 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5542 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
5543
5544 if (FirstEncoding & 0x1) {
5545 Error(S, "expected first even register of a "
5546 "consecutive same-size even/odd register pair");
5547 return MatchOperand_ParseFail;
5548 }
5549
5550 if (getParser().getTok().isNot(AsmToken::Comma)) {
5551 Error(getLoc(), "expected comma");
5552 return MatchOperand_ParseFail;
5553 }
5554 // Eat the comma
5555 getParser().Lex();
5556
5557 SMLoc E = getLoc();
5558 unsigned SecondReg;
5559 Res = tryParseScalarRegister(SecondReg);
5560 if (Res != MatchOperand_Success)
5561 return MatchOperand_ParseFail;
5562
5563 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
5564 (isXReg && !XRegClass.contains(SecondReg)) ||
5565 (isWReg && !WRegClass.contains(SecondReg))) {
5566 Error(E,"expected second odd register of a "
5567 "consecutive same-size even/odd register pair");
5568 return MatchOperand_ParseFail;
5569 }
5570
5571 unsigned Pair = 0;
5572 if (isXReg) {
5573 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
5574 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
5575 } else {
5576 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
5577 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
5578 }
5579
5580 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
5581 getLoc(), getContext()));
5582
5583 return MatchOperand_Success;
5584}
5585
5586template <bool ParseShiftExtend, bool ParseSuffix>
5587OperandMatchResultTy
5588AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
5589 const SMLoc S = getLoc();
5590 // Check for a SVE vector register specifier first.
5591 unsigned RegNum;
5592 StringRef Kind;
5593
5594 OperandMatchResultTy Res =
5595 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5596
5597 if (Res != MatchOperand_Success)
5598 return Res;
5599
5600 if (ParseSuffix && Kind.empty())
5601 return MatchOperand_NoMatch;
5602
5603 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
5604 if (!KindRes)
5605 return MatchOperand_NoMatch;
5606
5607 unsigned ElementWidth = KindRes->second;
5608
5609 // No shift/extend is the default.
5610 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
5611 Operands.push_back(AArch64Operand::CreateVectorReg(
5612 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
5613
5614 OperandMatchResultTy Res = tryParseVectorIndex(Operands);
5615 if (Res == MatchOperand_ParseFail)
5616 return MatchOperand_ParseFail;
5617 return MatchOperand_Success;
5618 }
5619
5620 // Eat the comma
5621 getParser().Lex();
5622
5623 // Match the shift
5624 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
5625 Res = tryParseOptionalShiftExtend(ExtOpnd);
5626 if (Res != MatchOperand_Success)
5627 return Res;
5628
5629 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
5630 Operands.push_back(AArch64Operand::CreateVectorReg(
5631 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
5632 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5633 Ext->hasShiftExtendAmount()));
5634
5635 return MatchOperand_Success;
5636}
5637
5638OperandMatchResultTy
5639AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
5640 MCAsmParser &Parser = getParser();
5641
5642 SMLoc SS = getLoc();
5643 const AsmToken &TokE = Parser.getTok();
5644 bool IsHash = TokE.is(AsmToken::Hash);
5645
5646 if (!IsHash && TokE.isNot(AsmToken::Identifier))
5647 return MatchOperand_NoMatch;
5648
5649 int64_t Pattern;
5650 if (IsHash) {
5651 Parser.Lex(); // Eat hash
5652
5653 // Parse the immediate operand.
5654 const MCExpr *ImmVal;
5655 SS = getLoc();
5656 if (Parser.parseExpression(ImmVal))
5657 return MatchOperand_ParseFail;
5658
5659 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
5660 if (!MCE)
5661 return MatchOperand_ParseFail;
5662
5663 Pattern = MCE->getValue();
5664 } else {
5665 // Parse the pattern
5666 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
5667 if (!Pat)
5668 return MatchOperand_NoMatch;
5669
5670 Parser.Lex();
5671 Pattern = Pat->Encoding;
5672 assert(Pattern >= 0 && Pattern < 32)((Pattern >= 0 && Pattern < 32) ? static_cast<
void> (0) : __assert_fail ("Pattern >= 0 && Pattern < 32"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 5672, __PRETTY_FUNCTION__))
;
5673 }
5674
5675 Operands.push_back(
5676 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
5677 SS, getLoc(), getContext()));
5678
5679 return MatchOperand_Success;
5680}