Bug Summary

File:lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Warning:line 2336, column 22
1st function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AArch64AsmParser.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser -I /build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn329677/include -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Target/AArch64/AsmParser/.. -I /build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/.. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Target/AArch64/AsmParser -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-04-11-031539-24776-1 -x c++ /build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/AArch64AddressingModes.h"
11#include "MCTargetDesc/AArch64MCExpr.h"
12#include "MCTargetDesc/AArch64MCTargetDesc.h"
13#include "MCTargetDesc/AArch64TargetStreamer.h"
14#include "Utils/AArch64BaseInfo.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/ADT/StringExtras.h"
21#include "llvm/ADT/StringMap.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/ADT/StringSwitch.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/MC/MCContext.h"
26#include "llvm/MC/MCExpr.h"
27#include "llvm/MC/MCInst.h"
28#include "llvm/MC/MCLinkerOptimizationHint.h"
29#include "llvm/MC/MCObjectFileInfo.h"
30#include "llvm/MC/MCParser/MCAsmLexer.h"
31#include "llvm/MC/MCParser/MCAsmParser.h"
32#include "llvm/MC/MCParser/MCAsmParserExtension.h"
33#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
34#include "llvm/MC/MCParser/MCTargetAsmParser.h"
35#include "llvm/MC/MCRegisterInfo.h"
36#include "llvm/MC/MCStreamer.h"
37#include "llvm/MC/MCSubtargetInfo.h"
38#include "llvm/MC/MCSymbol.h"
39#include "llvm/MC/MCTargetOptions.h"
40#include "llvm/MC/SubtargetFeature.h"
41#include "llvm/Support/Casting.h"
42#include "llvm/Support/Compiler.h"
43#include "llvm/Support/ErrorHandling.h"
44#include "llvm/Support/MathExtras.h"
45#include "llvm/Support/SMLoc.h"
46#include "llvm/Support/TargetParser.h"
47#include "llvm/Support/TargetRegistry.h"
48#include "llvm/Support/raw_ostream.h"
49#include <cassert>
50#include <cctype>
51#include <cstdint>
52#include <cstdio>
53#include <string>
54#include <tuple>
55#include <utility>
56#include <vector>
57
58using namespace llvm;
59
60namespace {
61
62enum class RegKind {
63 Scalar,
64 NeonVector,
65 SVEDataVector,
66 SVEPredicateVector
67};
68
69class AArch64AsmParser : public MCTargetAsmParser {
70private:
71 StringRef Mnemonic; ///< Instruction mnemonic.
72
73 // Map of register aliases registers via the .req directive.
74 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
75
76 AArch64TargetStreamer &getTargetStreamer() {
77 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
78 return static_cast<AArch64TargetStreamer &>(TS);
79 }
80
81 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
82
83 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
84 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
85 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
86 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
87 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
88 int tryParseRegister();
89 int tryMatchVectorRegister(StringRef &Kind, bool expected);
90 bool parseRegister(OperandVector &Operands);
91 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
92 bool parseVectorList(OperandVector &Operands);
93 bool parseOperand(OperandVector &Operands, bool isCondCode,
94 bool invertCondCode);
95
96 bool showMatchError(SMLoc Loc, unsigned ErrCode, OperandVector &Operands);
97
98 bool parseDirectiveArch(SMLoc L);
99 bool parseDirectiveCPU(SMLoc L);
100 bool parseDirectiveWord(unsigned Size, SMLoc L);
101 bool parseDirectiveInst(SMLoc L);
102
103 bool parseDirectiveTLSDescCall(SMLoc L);
104
105 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
106 bool parseDirectiveLtorg(SMLoc L);
107
108 bool parseDirectiveReq(StringRef Name, SMLoc L);
109 bool parseDirectiveUnreq(SMLoc L);
110
111 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
112 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
113 OperandVector &Operands, MCStreamer &Out,
114 uint64_t &ErrorInfo,
115 bool MatchingInlineAsm) override;
116/// @name Auto-generated Match Functions
117/// {
118
119#define GET_ASSEMBLER_HEADER
120#include "AArch64GenAsmMatcher.inc"
121
122 /// }
123
124 OperandMatchResultTy tryParseSVERegister(int &Reg, StringRef &Kind,
125 RegKind MatchKind);
126 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
127 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
128 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
129 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
130 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
131 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
132 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
133 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
134 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
135 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
136 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
137 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
138 bool tryParseNeonVectorRegister(OperandVector &Operands);
139 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
140 template <bool ParseSuffix>
141 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
142 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
143 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
144
145public:
146 enum AArch64MatchResultTy {
147 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
148#define GET_OPERAND_DIAGNOSTIC_TYPES
149#include "AArch64GenAsmMatcher.inc"
150 };
151 bool IsILP32;
152
153 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
154 const MCInstrInfo &MII, const MCTargetOptions &Options)
155 : MCTargetAsmParser(Options, STI, MII) {
156 IsILP32 = Options.getABIName() == "ilp32";
157 MCAsmParserExtension::Initialize(Parser);
158 MCStreamer &S = getParser().getStreamer();
159 if (S.getTargetStreamer() == nullptr)
160 new AArch64TargetStreamer(S);
161
162 // Initialize the set of available features.
163 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
164 }
165
166 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
167 SMLoc NameLoc, OperandVector &Operands) override;
168 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
169 bool ParseDirective(AsmToken DirectiveID) override;
170 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
171 unsigned Kind) override;
172
173 static bool classifySymbolRef(const MCExpr *Expr,
174 AArch64MCExpr::VariantKind &ELFRefKind,
175 MCSymbolRefExpr::VariantKind &DarwinRefKind,
176 int64_t &Addend);
177};
178
179/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
180/// instruction.
181class AArch64Operand : public MCParsedAsmOperand {
182private:
183 enum KindTy {
184 k_Immediate,
185 k_ShiftedImm,
186 k_CondCode,
187 k_Register,
188 k_VectorList,
189 k_VectorIndex,
190 k_Token,
191 k_SysReg,
192 k_SysCR,
193 k_Prefetch,
194 k_ShiftExtend,
195 k_FPImm,
196 k_Barrier,
197 k_PSBHint,
198 } Kind;
199
200 SMLoc StartLoc, EndLoc;
201
202 struct TokOp {
203 const char *Data;
204 unsigned Length;
205 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
206 };
207
208 struct RegOp {
209 unsigned RegNum;
210 RegKind Kind;
211
212 int ElementWidth;
213 };
214
215 struct VectorListOp {
216 unsigned RegNum;
217 unsigned Count;
218 unsigned NumElements;
219 unsigned ElementKind;
220 };
221
222 struct VectorIndexOp {
223 unsigned Val;
224 };
225
226 struct ImmOp {
227 const MCExpr *Val;
228 };
229
230 struct ShiftedImmOp {
231 const MCExpr *Val;
232 unsigned ShiftAmount;
233 };
234
235 struct CondCodeOp {
236 AArch64CC::CondCode Code;
237 };
238
239 struct FPImmOp {
240 unsigned Val; // Encoded 8-bit representation.
241 };
242
243 struct BarrierOp {
244 const char *Data;
245 unsigned Length;
246 unsigned Val; // Not the enum since not all values have names.
247 };
248
249 struct SysRegOp {
250 const char *Data;
251 unsigned Length;
252 uint32_t MRSReg;
253 uint32_t MSRReg;
254 uint32_t PStateField;
255 };
256
257 struct SysCRImmOp {
258 unsigned Val;
259 };
260
261 struct PrefetchOp {
262 const char *Data;
263 unsigned Length;
264 unsigned Val;
265 };
266
267 struct PSBHintOp {
268 const char *Data;
269 unsigned Length;
270 unsigned Val;
271 };
272
273 struct ShiftExtendOp {
274 AArch64_AM::ShiftExtendType Type;
275 unsigned Amount;
276 bool HasExplicitAmount;
277 };
278
279 struct ExtendOp {
280 unsigned Val;
281 };
282
283 union {
284 struct TokOp Tok;
285 struct RegOp Reg;
286 struct VectorListOp VectorList;
287 struct VectorIndexOp VectorIndex;
288 struct ImmOp Imm;
289 struct ShiftedImmOp ShiftedImm;
290 struct CondCodeOp CondCode;
291 struct FPImmOp FPImm;
292 struct BarrierOp Barrier;
293 struct SysRegOp SysReg;
294 struct SysCRImmOp SysCRImm;
295 struct PrefetchOp Prefetch;
296 struct PSBHintOp PSBHint;
297 struct ShiftExtendOp ShiftExtend;
298 };
299
300 // Keep the MCContext around as the MCExprs may need manipulated during
301 // the add<>Operands() calls.
302 MCContext &Ctx;
303
304public:
305 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
306
307 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
308 Kind = o.Kind;
309 StartLoc = o.StartLoc;
310 EndLoc = o.EndLoc;
311 switch (Kind) {
312 case k_Token:
313 Tok = o.Tok;
314 break;
315 case k_Immediate:
316 Imm = o.Imm;
317 break;
318 case k_ShiftedImm:
319 ShiftedImm = o.ShiftedImm;
320 break;
321 case k_CondCode:
322 CondCode = o.CondCode;
323 break;
324 case k_FPImm:
325 FPImm = o.FPImm;
326 break;
327 case k_Barrier:
328 Barrier = o.Barrier;
329 break;
330 case k_Register:
331 Reg = o.Reg;
332 break;
333 case k_VectorList:
334 VectorList = o.VectorList;
335 break;
336 case k_VectorIndex:
337 VectorIndex = o.VectorIndex;
338 break;
339 case k_SysReg:
340 SysReg = o.SysReg;
341 break;
342 case k_SysCR:
343 SysCRImm = o.SysCRImm;
344 break;
345 case k_Prefetch:
346 Prefetch = o.Prefetch;
347 break;
348 case k_PSBHint:
349 PSBHint = o.PSBHint;
350 break;
351 case k_ShiftExtend:
352 ShiftExtend = o.ShiftExtend;
353 break;
354 }
355 }
356
357 /// getStartLoc - Get the location of the first token of this operand.
358 SMLoc getStartLoc() const override { return StartLoc; }
359 /// getEndLoc - Get the location of the last token of this operand.
360 SMLoc getEndLoc() const override { return EndLoc; }
361
362 StringRef getToken() const {
363 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 363, __extension__ __PRETTY_FUNCTION__))
;
364 return StringRef(Tok.Data, Tok.Length);
365 }
366
367 bool isTokenSuffix() const {
368 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 368, __extension__ __PRETTY_FUNCTION__))
;
369 return Tok.IsSuffix;
370 }
371
372 const MCExpr *getImm() const {
373 assert(Kind == k_Immediate && "Invalid access!")(static_cast <bool> (Kind == k_Immediate && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 373, __extension__ __PRETTY_FUNCTION__))
;
374 return Imm.Val;
375 }
376
377 const MCExpr *getShiftedImmVal() const {
378 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 378, __extension__ __PRETTY_FUNCTION__))
;
379 return ShiftedImm.Val;
380 }
381
382 unsigned getShiftedImmShift() const {
383 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 383, __extension__ __PRETTY_FUNCTION__))
;
384 return ShiftedImm.ShiftAmount;
385 }
386
387 AArch64CC::CondCode getCondCode() const {
388 assert(Kind == k_CondCode && "Invalid access!")(static_cast <bool> (Kind == k_CondCode && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 388, __extension__ __PRETTY_FUNCTION__))
;
389 return CondCode.Code;
390 }
391
392 unsigned getFPImm() const {
393 assert(Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 393, __extension__ __PRETTY_FUNCTION__))
;
394 return FPImm.Val;
395 }
396
397 unsigned getBarrier() const {
398 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 398, __extension__ __PRETTY_FUNCTION__))
;
399 return Barrier.Val;
400 }
401
402 StringRef getBarrierName() const {
403 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 403, __extension__ __PRETTY_FUNCTION__))
;
404 return StringRef(Barrier.Data, Barrier.Length);
405 }
406
407 unsigned getReg() const override {
408 assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 408, __extension__ __PRETTY_FUNCTION__))
;
409 return Reg.RegNum;
410 }
411
412 unsigned getVectorListStart() const {
413 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 413, __extension__ __PRETTY_FUNCTION__))
;
414 return VectorList.RegNum;
415 }
416
417 unsigned getVectorListCount() const {
418 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 418, __extension__ __PRETTY_FUNCTION__))
;
419 return VectorList.Count;
420 }
421
422 unsigned getVectorIndex() const {
423 assert(Kind == k_VectorIndex && "Invalid access!")(static_cast <bool> (Kind == k_VectorIndex && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 423, __extension__ __PRETTY_FUNCTION__))
;
424 return VectorIndex.Val;
425 }
426
427 StringRef getSysReg() const {
428 assert(Kind == k_SysReg && "Invalid access!")(static_cast <bool> (Kind == k_SysReg && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 428, __extension__ __PRETTY_FUNCTION__))
;
429 return StringRef(SysReg.Data, SysReg.Length);
430 }
431
432 unsigned getSysCR() const {
433 assert(Kind == k_SysCR && "Invalid access!")(static_cast <bool> (Kind == k_SysCR && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 433, __extension__ __PRETTY_FUNCTION__))
;
434 return SysCRImm.Val;
435 }
436
437 unsigned getPrefetch() const {
438 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 438, __extension__ __PRETTY_FUNCTION__))
;
439 return Prefetch.Val;
440 }
441
442 unsigned getPSBHint() const {
443 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 443, __extension__ __PRETTY_FUNCTION__))
;
444 return PSBHint.Val;
445 }
446
447 StringRef getPSBHintName() const {
448 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 448, __extension__ __PRETTY_FUNCTION__))
;
449 return StringRef(PSBHint.Data, PSBHint.Length);
450 }
451
452 StringRef getPrefetchName() const {
453 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 453, __extension__ __PRETTY_FUNCTION__))
;
454 return StringRef(Prefetch.Data, Prefetch.Length);
455 }
456
457 AArch64_AM::ShiftExtendType getShiftExtendType() const {
458 assert(Kind == k_ShiftExtend && "Invalid access!")(static_cast <bool> (Kind == k_ShiftExtend && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftExtend && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 458, __extension__ __PRETTY_FUNCTION__))
;
459 return ShiftExtend.Type;
460 }
461
462 unsigned getShiftExtendAmount() const {
463 assert(Kind == k_ShiftExtend && "Invalid access!")(static_cast <bool> (Kind == k_ShiftExtend && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftExtend && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 463, __extension__ __PRETTY_FUNCTION__))
;
464 return ShiftExtend.Amount;
465 }
466
467 bool hasShiftExtendAmount() const {
468 assert(Kind == k_ShiftExtend && "Invalid access!")(static_cast <bool> (Kind == k_ShiftExtend && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftExtend && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 468, __extension__ __PRETTY_FUNCTION__))
;
469 return ShiftExtend.HasExplicitAmount;
470 }
471
472 bool isImm() const override { return Kind == k_Immediate; }
473 bool isMem() const override { return false; }
474
475 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
476
477 template <int Bits, int Scale> bool isSImmScaled() const {
478 if (!isImm())
479 return false;
480 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
481 if (!MCE)
482 return false;
483
484 int64_t Shift = Bits - 1;
485 int64_t MinVal = (int64_t(1) << Shift) * -Scale;
486 int64_t MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
487
488 int64_t Val = MCE->getValue();
489 return Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0;
490 }
491
492 bool isSVEPattern() const {
493 if (!isImm())
494 return false;
495 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
496 if (!MCE)
497 return false;
498 int64_t Val = MCE->getValue();
499 return Val >= 0 && Val < 32;
500 }
501
502 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
503 AArch64MCExpr::VariantKind ELFRefKind;
504 MCSymbolRefExpr::VariantKind DarwinRefKind;
505 int64_t Addend;
506 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
507 Addend)) {
508 // If we don't understand the expression, assume the best and
509 // let the fixup and relocation code deal with it.
510 return true;
511 }
512
513 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
514 ELFRefKind == AArch64MCExpr::VK_LO12 ||
515 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
516 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
517 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
518 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
519 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
520 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
521 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
522 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
523 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
524 // Note that we don't range-check the addend. It's adjusted modulo page
525 // size when converted, so there is no "out of range" condition when using
526 // @pageoff.
527 return Addend >= 0 && (Addend % Scale) == 0;
528 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
529 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
530 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
531 return Addend == 0;
532 }
533
534 return false;
535 }
536
537 template <int Scale> bool isUImm12Offset() const {
538 if (!isImm())
539 return false;
540
541 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
542 if (!MCE)
543 return isSymbolicUImm12Offset(getImm(), Scale);
544
545 int64_t Val = MCE->getValue();
546 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
547 }
548
549 template <int N, int M>
550 bool isImmInRange() const {
551 if (!isImm())
552 return false;
553 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
554 if (!MCE)
555 return false;
556 int64_t Val = MCE->getValue();
557 return (Val >= N && Val <= M);
558 }
559
560 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
561 // a logical immediate can always be represented when inverted.
562 template <typename T>
563 bool isLogicalImm() const {
564 if (!isImm())
565 return false;
566 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
567 if (!MCE)
568 return false;
569
570 int64_t Val = MCE->getValue();
571 int64_t SVal = typename std::make_signed<T>::type(Val);
572 int64_t UVal = typename std::make_unsigned<T>::type(Val);
573 if (Val != SVal && Val != UVal)
574 return false;
575
576 return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8);
577 }
578
579 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
580
581 bool isAddSubImm() const {
582 if (!isShiftedImm() && !isImm())
583 return false;
584
585 const MCExpr *Expr;
586
587 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
588 if (isShiftedImm()) {
589 unsigned Shift = ShiftedImm.ShiftAmount;
590 Expr = ShiftedImm.Val;
591 if (Shift != 0 && Shift != 12)
592 return false;
593 } else {
594 Expr = getImm();
595 }
596
597 AArch64MCExpr::VariantKind ELFRefKind;
598 MCSymbolRefExpr::VariantKind DarwinRefKind;
599 int64_t Addend;
600 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
601 DarwinRefKind, Addend)) {
602 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
603 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
604 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
605 || ELFRefKind == AArch64MCExpr::VK_LO12
606 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
607 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
608 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
609 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
610 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
611 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
612 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
613 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
614 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
615 }
616
617 // If it's a constant, it should be a real immediate in range:
618 if (auto *CE = dyn_cast<MCConstantExpr>(Expr))
619 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
620
621 // If it's an expression, we hope for the best and let the fixup/relocation
622 // code deal with it.
623 return true;
624 }
625
626 bool isAddSubImmNeg() const {
627 if (!isShiftedImm() && !isImm())
628 return false;
629
630 const MCExpr *Expr;
631
632 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
633 if (isShiftedImm()) {
634 unsigned Shift = ShiftedImm.ShiftAmount;
635 Expr = ShiftedImm.Val;
636 if (Shift != 0 && Shift != 12)
637 return false;
638 } else
639 Expr = getImm();
640
641 // Otherwise it should be a real negative immediate in range:
642 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
643 return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
644 }
645
646 bool isCondCode() const { return Kind == k_CondCode; }
647
648 bool isSIMDImmType10() const {
649 if (!isImm())
650 return false;
651 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
652 if (!MCE)
653 return false;
654 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
655 }
656
657 template<int N>
658 bool isBranchTarget() const {
659 if (!isImm())
660 return false;
661 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
662 if (!MCE)
663 return true;
664 int64_t Val = MCE->getValue();
665 if (Val & 0x3)
666 return false;
667 assert(N > 0 && "Branch target immediate cannot be 0 bits!")(static_cast <bool> (N > 0 && "Branch target immediate cannot be 0 bits!"
) ? void (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 667, __extension__ __PRETTY_FUNCTION__))
;
668 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
669 }
670
671 bool
672 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
673 if (!isImm())
674 return false;
675
676 AArch64MCExpr::VariantKind ELFRefKind;
677 MCSymbolRefExpr::VariantKind DarwinRefKind;
678 int64_t Addend;
679 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
680 DarwinRefKind, Addend)) {
681 return false;
682 }
683 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
684 return false;
685
686 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
687 if (ELFRefKind == AllowedModifiers[i])
688 return Addend == 0;
689 }
690
691 return false;
692 }
693
694 bool isMovZSymbolG3() const {
695 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
696 }
697
698 bool isMovZSymbolG2() const {
699 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
700 AArch64MCExpr::VK_TPREL_G2,
701 AArch64MCExpr::VK_DTPREL_G2});
702 }
703
704 bool isMovZSymbolG1() const {
705 return isMovWSymbol({
706 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
707 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
708 AArch64MCExpr::VK_DTPREL_G1,
709 });
710 }
711
712 bool isMovZSymbolG0() const {
713 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
714 AArch64MCExpr::VK_TPREL_G0,
715 AArch64MCExpr::VK_DTPREL_G0});
716 }
717
718 bool isMovKSymbolG3() const {
719 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
720 }
721
722 bool isMovKSymbolG2() const {
723 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
724 }
725
726 bool isMovKSymbolG1() const {
727 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
728 AArch64MCExpr::VK_TPREL_G1_NC,
729 AArch64MCExpr::VK_DTPREL_G1_NC});
730 }
731
732 bool isMovKSymbolG0() const {
733 return isMovWSymbol(
734 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
735 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
736 }
737
738 template<int RegWidth, int Shift>
739 bool isMOVZMovAlias() const {
740 if (!isImm()) return false;
741
742 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
743 if (!CE) return false;
744 uint64_t Value = CE->getValue();
745
746 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
747 }
748
749 template<int RegWidth, int Shift>
750 bool isMOVNMovAlias() const {
751 if (!isImm()) return false;
752
753 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
754 if (!CE) return false;
755 uint64_t Value = CE->getValue();
756
757 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
758 }
759
760 bool isFPImm() const { return Kind == k_FPImm; }
761 bool isBarrier() const { return Kind == k_Barrier; }
762 bool isSysReg() const { return Kind == k_SysReg; }
763
764 bool isMRSSystemRegister() const {
765 if (!isSysReg()) return false;
766
767 return SysReg.MRSReg != -1U;
768 }
769
770 bool isMSRSystemRegister() const {
771 if (!isSysReg()) return false;
772 return SysReg.MSRReg != -1U;
773 }
774
775 bool isSystemPStateFieldWithImm0_1() const {
776 if (!isSysReg()) return false;
777 return (SysReg.PStateField == AArch64PState::PAN ||
778 SysReg.PStateField == AArch64PState::UAO);
779 }
780
781 bool isSystemPStateFieldWithImm0_15() const {
782 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
783 return SysReg.PStateField != -1U;
784 }
785
786 bool isReg() const override {
787 return Kind == k_Register;
788 }
789
790 bool isScalarReg() const {
791 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
792 }
793
794 bool isNeonVectorReg() const {
795 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
796 }
797
798 bool isNeonVectorRegLo() const {
799 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
800 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
801 Reg.RegNum);
802 }
803
804 template <unsigned Class> bool isSVEVectorReg() const {
805 RegKind RK;
806 switch (Class) {
807 case AArch64::ZPRRegClassID:
808 RK = RegKind::SVEDataVector;
809 break;
810 case AArch64::PPRRegClassID:
811 case AArch64::PPR_3bRegClassID:
812 RK = RegKind::SVEPredicateVector;
813 break;
814 default:
815 llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class",
"/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 815)
;
816 }
817
818 return (Kind == k_Register && Reg.Kind == RK) &&
819 AArch64MCRegisterClasses[Class].contains(getReg());
820 }
821
822 template <int ElementWidth, unsigned Class>
823 bool isSVEVectorRegOfWidth() const {
824 return isSVEVectorReg<Class>() &&
825 (ElementWidth == -1 || Reg.ElementWidth == ElementWidth);
826 }
827
828 bool isGPR32as64() const {
829 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
830 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
831 }
832
833 bool isWSeqPair() const {
834 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
835 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
836 Reg.RegNum);
837 }
838
839 bool isXSeqPair() const {
840 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
841 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
842 Reg.RegNum);
843 }
844
845 bool isGPR64sp0() const {
846 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
847 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
848 }
849
850 template<int64_t Angle, int64_t Remainder>
851 bool isComplexRotation() const {
852 if (!isImm()) return false;
853
854 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
855 if (!CE) return false;
856 uint64_t Value = CE->getValue();
857
858 return (Value % Angle == Remainder && Value <= 270);
859 }
860
861 /// Is this a vector list with the type implicit (presumably attached to the
862 /// instruction itself)?
863 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
864 return Kind == k_VectorList && VectorList.Count == NumRegs &&
865 !VectorList.ElementKind;
866 }
867
868 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
869 bool isTypedVectorList() const {
870 if (Kind != k_VectorList)
871 return false;
872 if (VectorList.Count != NumRegs)
873 return false;
874 if (VectorList.ElementKind != ElementKind)
875 return false;
876 return VectorList.NumElements == NumElements;
877 }
878
879 bool isVectorIndex1() const {
880 return Kind == k_VectorIndex && VectorIndex.Val == 1;
881 }
882
883 bool isVectorIndexB() const {
884 return Kind == k_VectorIndex && VectorIndex.Val < 16;
885 }
886
887 bool isVectorIndexH() const {
888 return Kind == k_VectorIndex && VectorIndex.Val < 8;
889 }
890
891 bool isVectorIndexS() const {
892 return Kind == k_VectorIndex && VectorIndex.Val < 4;
893 }
894
895 bool isVectorIndexD() const {
896 return Kind == k_VectorIndex && VectorIndex.Val < 2;
897 }
898
899 bool isToken() const override { return Kind == k_Token; }
900
901 bool isTokenEqual(StringRef Str) const {
902 return Kind == k_Token && getToken() == Str;
903 }
904 bool isSysCR() const { return Kind == k_SysCR; }
905 bool isPrefetch() const { return Kind == k_Prefetch; }
906 bool isPSBHint() const { return Kind == k_PSBHint; }
907 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
908 bool isShifter() const {
909 if (!isShiftExtend())
910 return false;
911
912 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
913 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
914 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
915 ST == AArch64_AM::MSL);
916 }
917 bool isExtend() const {
918 if (!isShiftExtend())
919 return false;
920
921 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
922 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
923 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
924 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
925 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
926 ET == AArch64_AM::LSL) &&
927 getShiftExtendAmount() <= 4;
928 }
929
930 bool isExtend64() const {
931 if (!isExtend())
932 return false;
933 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
934 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
935 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
936 }
937
938 bool isExtendLSL64() const {
939 if (!isExtend())
940 return false;
941 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
942 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
943 ET == AArch64_AM::LSL) &&
944 getShiftExtendAmount() <= 4;
945 }
946
947 template<int Width> bool isMemXExtend() const {
948 if (!isExtend())
949 return false;
950 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
951 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
952 (getShiftExtendAmount() == Log2_32(Width / 8) ||
953 getShiftExtendAmount() == 0);
954 }
955
956 template<int Width> bool isMemWExtend() const {
957 if (!isExtend())
958 return false;
959 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
960 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
961 (getShiftExtendAmount() == Log2_32(Width / 8) ||
962 getShiftExtendAmount() == 0);
963 }
964
965 template <unsigned width>
966 bool isArithmeticShifter() const {
967 if (!isShifter())
968 return false;
969
970 // An arithmetic shifter is LSL, LSR, or ASR.
971 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
972 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
973 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
974 }
975
976 template <unsigned width>
977 bool isLogicalShifter() const {
978 if (!isShifter())
979 return false;
980
981 // A logical shifter is LSL, LSR, ASR or ROR.
982 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
983 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
984 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
985 getShiftExtendAmount() < width;
986 }
987
988 bool isMovImm32Shifter() const {
989 if (!isShifter())
990 return false;
991
992 // A MOVi shifter is LSL of 0, 16, 32, or 48.
993 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
994 if (ST != AArch64_AM::LSL)
995 return false;
996 uint64_t Val = getShiftExtendAmount();
997 return (Val == 0 || Val == 16);
998 }
999
1000 bool isMovImm64Shifter() const {
1001 if (!isShifter())
1002 return false;
1003
1004 // A MOVi shifter is LSL of 0 or 16.
1005 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1006 if (ST != AArch64_AM::LSL)
1007 return false;
1008 uint64_t Val = getShiftExtendAmount();
1009 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1010 }
1011
1012 bool isLogicalVecShifter() const {
1013 if (!isShifter())
1014 return false;
1015
1016 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1017 unsigned Shift = getShiftExtendAmount();
1018 return getShiftExtendType() == AArch64_AM::LSL &&
1019 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1020 }
1021
1022 bool isLogicalVecHalfWordShifter() const {
1023 if (!isLogicalVecShifter())
1024 return false;
1025
1026 // A logical vector shifter is a left shift by 0 or 8.
1027 unsigned Shift = getShiftExtendAmount();
1028 return getShiftExtendType() == AArch64_AM::LSL &&
1029 (Shift == 0 || Shift == 8);
1030 }
1031
1032 bool isMoveVecShifter() const {
1033 if (!isShiftExtend())
1034 return false;
1035
1036 // A logical vector shifter is a left shift by 8 or 16.
1037 unsigned Shift = getShiftExtendAmount();
1038 return getShiftExtendType() == AArch64_AM::MSL &&
1039 (Shift == 8 || Shift == 16);
1040 }
1041
1042 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1043 // to LDUR/STUR when the offset is not legal for the former but is for
1044 // the latter. As such, in addition to checking for being a legal unscaled
1045 // address, also check that it is not a legal scaled address. This avoids
1046 // ambiguity in the matcher.
1047 template<int Width>
1048 bool isSImm9OffsetFB() const {
1049 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1050 }
1051
1052 bool isAdrpLabel() const {
1053 // Validation was handled during parsing, so we just sanity check that
1054 // something didn't go haywire.
1055 if (!isImm())
1056 return false;
1057
1058 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1059 int64_t Val = CE->getValue();
1060 int64_t Min = - (4096 * (1LL << (21 - 1)));
1061 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1062 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1063 }
1064
1065 return true;
1066 }
1067
1068 bool isAdrLabel() const {
1069 // Validation was handled during parsing, so we just sanity check that
1070 // something didn't go haywire.
1071 if (!isImm())
1072 return false;
1073
1074 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1075 int64_t Val = CE->getValue();
1076 int64_t Min = - (1LL << (21 - 1));
1077 int64_t Max = ((1LL << (21 - 1)) - 1);
1078 return Val >= Min && Val <= Max;
1079 }
1080
1081 return true;
1082 }
1083
1084 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1085 // Add as immediates when possible. Null MCExpr = 0.
1086 if (!Expr)
1087 Inst.addOperand(MCOperand::createImm(0));
1088 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1089 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1090 else
1091 Inst.addOperand(MCOperand::createExpr(Expr));
1092 }
1093
1094 void addRegOperands(MCInst &Inst, unsigned N) const {
1095 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1095, __extension__ __PRETTY_FUNCTION__))
;
1096 Inst.addOperand(MCOperand::createReg(getReg()));
1097 }
1098
1099 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1100 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1100, __extension__ __PRETTY_FUNCTION__))
;
1101 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1102, __extension__ __PRETTY_FUNCTION__))
1102 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1102, __extension__ __PRETTY_FUNCTION__))
;
1103
1104 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1105 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1106 RI->getEncodingValue(getReg()));
1107
1108 Inst.addOperand(MCOperand::createReg(Reg));
1109 }
1110
1111 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1112 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1112, __extension__ __PRETTY_FUNCTION__))
;
1113 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1114, __extension__ __PRETTY_FUNCTION__))
1114 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1114, __extension__ __PRETTY_FUNCTION__))
;
1115 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1116 }
1117
1118 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1119 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1119, __extension__ __PRETTY_FUNCTION__))
;
1120 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1121, __extension__ __PRETTY_FUNCTION__))
1121 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1121, __extension__ __PRETTY_FUNCTION__))
;
1122 Inst.addOperand(MCOperand::createReg(getReg()));
1123 }
1124
1125 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1126 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1126, __extension__ __PRETTY_FUNCTION__))
;
1127 Inst.addOperand(MCOperand::createReg(getReg()));
1128 }
1129
1130 template <unsigned NumRegs>
1131 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1132 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1132, __extension__ __PRETTY_FUNCTION__))
;
1133 static const unsigned FirstRegs[] = { AArch64::D0,
1134 AArch64::D0_D1,
1135 AArch64::D0_D1_D2,
1136 AArch64::D0_D1_D2_D3 };
1137 unsigned FirstReg = FirstRegs[NumRegs - 1];
1138
1139 Inst.addOperand(
1140 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1141 }
1142
1143 template <unsigned NumRegs>
1144 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1145 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1145, __extension__ __PRETTY_FUNCTION__))
;
1146 static const unsigned FirstRegs[] = { AArch64::Q0,
1147 AArch64::Q0_Q1,
1148 AArch64::Q0_Q1_Q2,
1149 AArch64::Q0_Q1_Q2_Q3 };
1150 unsigned FirstReg = FirstRegs[NumRegs - 1];
1151
1152 Inst.addOperand(
1153 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1154 }
1155
1156 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1157 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1157, __extension__ __PRETTY_FUNCTION__))
;
1158 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1159 }
1160
1161 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1162 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1162, __extension__ __PRETTY_FUNCTION__))
;
1163 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1164 }
1165
1166 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1167 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1167, __extension__ __PRETTY_FUNCTION__))
;
1168 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1169 }
1170
1171 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1172 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1172, __extension__ __PRETTY_FUNCTION__))
;
1173 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1174 }
1175
1176 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1177 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1177, __extension__ __PRETTY_FUNCTION__))
;
1178 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1179 }
1180
1181 void addImmOperands(MCInst &Inst, unsigned N) const {
1182 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1182, __extension__ __PRETTY_FUNCTION__))
;
1183 // If this is a pageoff symrefexpr with an addend, adjust the addend
1184 // to be only the page-offset portion. Otherwise, just add the expr
1185 // as-is.
1186 addExpr(Inst, getImm());
1187 }
1188
1189 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1190 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1190, __extension__ __PRETTY_FUNCTION__))
;
1191 if (isShiftedImm()) {
1192 addExpr(Inst, getShiftedImmVal());
1193 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1194 } else {
1195 addExpr(Inst, getImm());
1196 Inst.addOperand(MCOperand::createImm(0));
1197 }
1198 }
1199
1200 void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1201 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1201, __extension__ __PRETTY_FUNCTION__))
;
1202
1203 const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1204 const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1205 int64_t Val = -CE->getValue();
1206 unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1207
1208 Inst.addOperand(MCOperand::createImm(Val));
1209 Inst.addOperand(MCOperand::createImm(ShiftAmt));
1210 }
1211
1212 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1213 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1213, __extension__ __PRETTY_FUNCTION__))
;
1214 Inst.addOperand(MCOperand::createImm(getCondCode()));
1215 }
1216
1217 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1218 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1218, __extension__ __PRETTY_FUNCTION__))
;
1219 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1220 if (!MCE)
1221 addExpr(Inst, getImm());
1222 else
1223 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1224 }
1225
1226 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1227 addImmOperands(Inst, N);
1228 }
1229
1230 template<int Scale>
1231 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1232 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1232, __extension__ __PRETTY_FUNCTION__))
;
1233 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1234
1235 if (!MCE) {
1236 Inst.addOperand(MCOperand::createExpr(getImm()));
1237 return;
1238 }
1239 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1240 }
1241
1242 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1243 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1243, __extension__ __PRETTY_FUNCTION__))
;
1244 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1245 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1246 }
1247
1248 void addSImm10s8Operands(MCInst &Inst, unsigned N) const {
1249 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1249, __extension__ __PRETTY_FUNCTION__))
;
1250 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1251 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1252 }
1253
1254 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1255 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1255, __extension__ __PRETTY_FUNCTION__))
;
1256 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1257 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1258 }
1259
1260 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1261 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1261, __extension__ __PRETTY_FUNCTION__))
;
1262 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1263 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1264 }
1265
1266 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1267 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1267, __extension__ __PRETTY_FUNCTION__))
;
1268 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1269 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1270 }
1271
1272 void addImm0_1Operands(MCInst &Inst, unsigned N) const {
1273 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1273, __extension__ __PRETTY_FUNCTION__))
;
1274 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1275 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1276 }
1277
1278 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1279 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1279, __extension__ __PRETTY_FUNCTION__))
;
1280 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1281 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1282 }
1283
1284 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1285 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1285, __extension__ __PRETTY_FUNCTION__))
;
1286 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1287 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1288 }
1289
1290 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1291 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1291, __extension__ __PRETTY_FUNCTION__))
;
1292 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1293 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1294 }
1295
1296 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1297 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1297, __extension__ __PRETTY_FUNCTION__))
;
1298 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1299 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1299, __extension__ __PRETTY_FUNCTION__))
;
1300 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1301 }
1302
1303 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1304 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1304, __extension__ __PRETTY_FUNCTION__))
;
1305 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1306 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1307 }
1308
1309 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1310 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1310, __extension__ __PRETTY_FUNCTION__))
;
1311 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1312 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1313 }
1314
1315 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1316 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1316, __extension__ __PRETTY_FUNCTION__))
;
1317 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1318 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1319 }
1320
1321 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1322 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1322, __extension__ __PRETTY_FUNCTION__))
;
1323 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1324 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1325 }
1326
1327 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1328 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1328, __extension__ __PRETTY_FUNCTION__))
;
1329 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1330 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1331 }
1332
1333 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1334 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1334, __extension__ __PRETTY_FUNCTION__))
;
1335 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1336 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1337 }
1338
1339 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1340 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1340, __extension__ __PRETTY_FUNCTION__))
;
1341 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1342 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1343 }
1344
1345 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1346 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1346, __extension__ __PRETTY_FUNCTION__))
;
1347 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1348 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1349 }
1350
1351 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1352 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1352, __extension__ __PRETTY_FUNCTION__))
;
1353 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1354 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1355 }
1356
1357 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1358 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1358, __extension__ __PRETTY_FUNCTION__))
;
1359 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1360 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1361 }
1362
1363 template <typename T>
1364 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1365 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1365, __extension__ __PRETTY_FUNCTION__))
;
1366 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1367 typename std::make_unsigned<T>::type Val = MCE->getValue();
1368 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1369 Inst.addOperand(MCOperand::createImm(encoding));
1370 }
1371
1372 template <typename T>
1373 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1374 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1374, __extension__ __PRETTY_FUNCTION__))
;
1375 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1376 typename std::make_unsigned<T>::type Val = ~MCE->getValue();
1377 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1378 Inst.addOperand(MCOperand::createImm(encoding));
1379 }
1380
1381 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1382 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1382, __extension__ __PRETTY_FUNCTION__))
;
1383 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1384 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1385 Inst.addOperand(MCOperand::createImm(encoding));
1386 }
1387
1388 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1389 // Branch operands don't encode the low bits, so shift them off
1390 // here. If it's a label, however, just put it on directly as there's
1391 // not enough information now to do anything.
1392 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1392, __extension__ __PRETTY_FUNCTION__))
;
1393 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1394 if (!MCE) {
1395 addExpr(Inst, getImm());
1396 return;
1397 }
1398 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1398, __extension__ __PRETTY_FUNCTION__))
;
1399 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1400 }
1401
1402 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1403 // Branch operands don't encode the low bits, so shift them off
1404 // here. If it's a label, however, just put it on directly as there's
1405 // not enough information now to do anything.
1406 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1406, __extension__ __PRETTY_FUNCTION__))
;
1407 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1408 if (!MCE) {
1409 addExpr(Inst, getImm());
1410 return;
1411 }
1412 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1412, __extension__ __PRETTY_FUNCTION__))
;
1413 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1414 }
1415
1416 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1417 // Branch operands don't encode the low bits, so shift them off
1418 // here. If it's a label, however, just put it on directly as there's
1419 // not enough information now to do anything.
1420 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1420, __extension__ __PRETTY_FUNCTION__))
;
1421 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1422 if (!MCE) {
1423 addExpr(Inst, getImm());
1424 return;
1425 }
1426 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1426, __extension__ __PRETTY_FUNCTION__))
;
1427 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1428 }
1429
1430 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1431 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1431, __extension__ __PRETTY_FUNCTION__))
;
1432 Inst.addOperand(MCOperand::createImm(getFPImm()));
1433 }
1434
1435 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1436 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1436, __extension__ __PRETTY_FUNCTION__))
;
1437 Inst.addOperand(MCOperand::createImm(getBarrier()));
1438 }
1439
1440 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1441 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1441, __extension__ __PRETTY_FUNCTION__))
;
1442
1443 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1444 }
1445
1446 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1447 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1447, __extension__ __PRETTY_FUNCTION__))
;
1448
1449 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1450 }
1451
1452 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1453 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1453, __extension__ __PRETTY_FUNCTION__))
;
1454
1455 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1456 }
1457
1458 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1459 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1459, __extension__ __PRETTY_FUNCTION__))
;
1460
1461 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1462 }
1463
1464 void addSysCROperands(MCInst &Inst, unsigned N) const {
1465 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1465, __extension__ __PRETTY_FUNCTION__))
;
1466 Inst.addOperand(MCOperand::createImm(getSysCR()));
1467 }
1468
1469 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1470 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1470, __extension__ __PRETTY_FUNCTION__))
;
1471 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1472 }
1473
1474 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1475 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1475, __extension__ __PRETTY_FUNCTION__))
;
1476 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1477 }
1478
1479 void addShifterOperands(MCInst &Inst, unsigned N) const {
1480 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1480, __extension__ __PRETTY_FUNCTION__))
;
1481 unsigned Imm =
1482 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1483 Inst.addOperand(MCOperand::createImm(Imm));
1484 }
1485
1486 void addExtendOperands(MCInst &Inst, unsigned N) const {
1487 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1487, __extension__ __PRETTY_FUNCTION__))
;
1488 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1489 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1490 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1491 Inst.addOperand(MCOperand::createImm(Imm));
1492 }
1493
1494 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1495 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1495, __extension__ __PRETTY_FUNCTION__))
;
1496 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1497 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1498 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1499 Inst.addOperand(MCOperand::createImm(Imm));
1500 }
1501
1502 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1503 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1503, __extension__ __PRETTY_FUNCTION__))
;
1504 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1505 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1506 Inst.addOperand(MCOperand::createImm(IsSigned));
1507 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1508 }
1509
1510 // For 8-bit load/store instructions with a register offset, both the
1511 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1512 // they're disambiguated by whether the shift was explicit or implicit rather
1513 // than its size.
1514 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1515 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1515, __extension__ __PRETTY_FUNCTION__))
;
1516 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1517 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1518 Inst.addOperand(MCOperand::createImm(IsSigned));
1519 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1520 }
1521
1522 template<int Shift>
1523 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1524 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1524, __extension__ __PRETTY_FUNCTION__))
;
1525
1526 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1527 uint64_t Value = CE->getValue();
1528 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1529 }
1530
1531 template<int Shift>
1532 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1533 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1533, __extension__ __PRETTY_FUNCTION__))
;
1534
1535 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1536 uint64_t Value = CE->getValue();
1537 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1538 }
1539
1540 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1541 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1541, __extension__ __PRETTY_FUNCTION__))
;
1542 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1543 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1544 }
1545
1546 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1547 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1547, __extension__ __PRETTY_FUNCTION__))
;
1548 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1549 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1550 }
1551
1552 void print(raw_ostream &OS) const override;
1553
1554 static std::unique_ptr<AArch64Operand>
1555 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1556 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1557 Op->Tok.Data = Str.data();
1558 Op->Tok.Length = Str.size();
1559 Op->Tok.IsSuffix = IsSuffix;
1560 Op->StartLoc = S;
1561 Op->EndLoc = S;
1562 return Op;
1563 }
1564
1565 static std::unique_ptr<AArch64Operand>
1566 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx) {
1567 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1568 Op->Reg.RegNum = RegNum;
1569 Op->Reg.Kind = Kind;
1570 Op->StartLoc = S;
1571 Op->EndLoc = E;
1572 return Op;
1573 }
1574
1575 static std::unique_ptr<AArch64Operand>
1576 CreateReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1577 SMLoc S, SMLoc E, MCContext &Ctx) {
1578 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1579 Op->Reg.RegNum = RegNum;
1580 Op->Reg.ElementWidth = ElementWidth;
1581 Op->Reg.Kind = Kind;
1582 Op->StartLoc = S;
1583 Op->EndLoc = E;
1584 return Op;
1585 }
1586
1587 static std::unique_ptr<AArch64Operand>
1588 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1589 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1590 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1591 Op->VectorList.RegNum = RegNum;
1592 Op->VectorList.Count = Count;
1593 Op->VectorList.NumElements = NumElements;
1594 Op->VectorList.ElementKind = ElementKind;
1595 Op->StartLoc = S;
1596 Op->EndLoc = E;
1597 return Op;
1598 }
1599
1600 static std::unique_ptr<AArch64Operand>
1601 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1602 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1603 Op->VectorIndex.Val = Idx;
1604 Op->StartLoc = S;
1605 Op->EndLoc = E;
1606 return Op;
1607 }
1608
1609 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1610 SMLoc E, MCContext &Ctx) {
1611 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1612 Op->Imm.Val = Val;
1613 Op->StartLoc = S;
1614 Op->EndLoc = E;
1615 return Op;
1616 }
1617
1618 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1619 unsigned ShiftAmount,
1620 SMLoc S, SMLoc E,
1621 MCContext &Ctx) {
1622 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1623 Op->ShiftedImm .Val = Val;
1624 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1625 Op->StartLoc = S;
1626 Op->EndLoc = E;
1627 return Op;
1628 }
1629
1630 static std::unique_ptr<AArch64Operand>
1631 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1632 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1633 Op->CondCode.Code = Code;
1634 Op->StartLoc = S;
1635 Op->EndLoc = E;
1636 return Op;
1637 }
1638
1639 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1640 MCContext &Ctx) {
1641 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1642 Op->FPImm.Val = Val;
1643 Op->StartLoc = S;
1644 Op->EndLoc = S;
1645 return Op;
1646 }
1647
1648 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1649 StringRef Str,
1650 SMLoc S,
1651 MCContext &Ctx) {
1652 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1653 Op->Barrier.Val = Val;
1654 Op->Barrier.Data = Str.data();
1655 Op->Barrier.Length = Str.size();
1656 Op->StartLoc = S;
1657 Op->EndLoc = S;
1658 return Op;
1659 }
1660
1661 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1662 uint32_t MRSReg,
1663 uint32_t MSRReg,
1664 uint32_t PStateField,
1665 MCContext &Ctx) {
1666 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1667 Op->SysReg.Data = Str.data();
1668 Op->SysReg.Length = Str.size();
1669 Op->SysReg.MRSReg = MRSReg;
1670 Op->SysReg.MSRReg = MSRReg;
1671 Op->SysReg.PStateField = PStateField;
1672 Op->StartLoc = S;
1673 Op->EndLoc = S;
1674 return Op;
1675 }
1676
1677 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1678 SMLoc E, MCContext &Ctx) {
1679 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1680 Op->SysCRImm.Val = Val;
1681 Op->StartLoc = S;
1682 Op->EndLoc = E;
1683 return Op;
1684 }
1685
1686 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1687 StringRef Str,
1688 SMLoc S,
1689 MCContext &Ctx) {
1690 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1691 Op->Prefetch.Val = Val;
1692 Op->Barrier.Data = Str.data();
1693 Op->Barrier.Length = Str.size();
1694 Op->StartLoc = S;
1695 Op->EndLoc = S;
1696 return Op;
1697 }
1698
1699 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1700 StringRef Str,
1701 SMLoc S,
1702 MCContext &Ctx) {
1703 auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1704 Op->PSBHint.Val = Val;
1705 Op->PSBHint.Data = Str.data();
1706 Op->PSBHint.Length = Str.size();
1707 Op->StartLoc = S;
1708 Op->EndLoc = S;
1709 return Op;
1710 }
1711
1712 static std::unique_ptr<AArch64Operand>
1713 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1714 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1715 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1716 Op->ShiftExtend.Type = ShOp;
1717 Op->ShiftExtend.Amount = Val;
1718 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1719 Op->StartLoc = S;
1720 Op->EndLoc = E;
1721 return Op;
1722 }
1723};
1724
1725} // end anonymous namespace.
1726
1727void AArch64Operand::print(raw_ostream &OS) const {
1728 switch (Kind) {
1729 case k_FPImm:
1730 OS << "<fpimm " << getFPImm() << "("
1731 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1732 break;
1733 case k_Barrier: {
1734 StringRef Name = getBarrierName();
1735 if (!Name.empty())
1736 OS << "<barrier " << Name << ">";
1737 else
1738 OS << "<barrier invalid #" << getBarrier() << ">";
1739 break;
1740 }
1741 case k_Immediate:
1742 OS << *getImm();
1743 break;
1744 case k_ShiftedImm: {
1745 unsigned Shift = getShiftedImmShift();
1746 OS << "<shiftedimm ";
1747 OS << *getShiftedImmVal();
1748 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1749 break;
1750 }
1751 case k_CondCode:
1752 OS << "<condcode " << getCondCode() << ">";
1753 break;
1754 case k_Register:
1755 OS << "<register " << getReg() << ">";
1756 break;
1757 case k_VectorList: {
1758 OS << "<vectorlist ";
1759 unsigned Reg = getVectorListStart();
1760 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1761 OS << Reg + i << " ";
1762 OS << ">";
1763 break;
1764 }
1765 case k_VectorIndex:
1766 OS << "<vectorindex " << getVectorIndex() << ">";
1767 break;
1768 case k_SysReg:
1769 OS << "<sysreg: " << getSysReg() << '>';
1770 break;
1771 case k_Token:
1772 OS << "'" << getToken() << "'";
1773 break;
1774 case k_SysCR:
1775 OS << "c" << getSysCR();
1776 break;
1777 case k_Prefetch: {
1778 StringRef Name = getPrefetchName();
1779 if (!Name.empty())
1780 OS << "<prfop " << Name << ">";
1781 else
1782 OS << "<prfop invalid #" << getPrefetch() << ">";
1783 break;
1784 }
1785 case k_PSBHint:
1786 OS << getPSBHintName();
1787 break;
1788 case k_ShiftExtend:
1789 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1790 << getShiftExtendAmount();
1791 if (!hasShiftExtendAmount())
1792 OS << "<imp>";
1793 OS << '>';
1794 break;
1795 }
1796}
1797
1798/// @name Auto-generated Match Functions
1799/// {
1800
1801static unsigned MatchRegisterName(StringRef Name);
1802
1803/// }
1804
1805static unsigned MatchNeonVectorRegName(StringRef Name) {
1806 return StringSwitch<unsigned>(Name.lower())
1807 .Case("v0", AArch64::Q0)
1808 .Case("v1", AArch64::Q1)
1809 .Case("v2", AArch64::Q2)
1810 .Case("v3", AArch64::Q3)
1811 .Case("v4", AArch64::Q4)
1812 .Case("v5", AArch64::Q5)
1813 .Case("v6", AArch64::Q6)
1814 .Case("v7", AArch64::Q7)
1815 .Case("v8", AArch64::Q8)
1816 .Case("v9", AArch64::Q9)
1817 .Case("v10", AArch64::Q10)
1818 .Case("v11", AArch64::Q11)
1819 .Case("v12", AArch64::Q12)
1820 .Case("v13", AArch64::Q13)
1821 .Case("v14", AArch64::Q14)
1822 .Case("v15", AArch64::Q15)
1823 .Case("v16", AArch64::Q16)
1824 .Case("v17", AArch64::Q17)
1825 .Case("v18", AArch64::Q18)
1826 .Case("v19", AArch64::Q19)
1827 .Case("v20", AArch64::Q20)
1828 .Case("v21", AArch64::Q21)
1829 .Case("v22", AArch64::Q22)
1830 .Case("v23", AArch64::Q23)
1831 .Case("v24", AArch64::Q24)
1832 .Case("v25", AArch64::Q25)
1833 .Case("v26", AArch64::Q26)
1834 .Case("v27", AArch64::Q27)
1835 .Case("v28", AArch64::Q28)
1836 .Case("v29", AArch64::Q29)
1837 .Case("v30", AArch64::Q30)
1838 .Case("v31", AArch64::Q31)
1839 .Default(0);
1840}
1841
1842static bool isValidVectorKind(StringRef Name) {
1843 return StringSwitch<bool>(Name.lower())
1844 .Case(".8b", true)
1845 .Case(".16b", true)
1846 .Case(".4h", true)
1847 .Case(".8h", true)
1848 .Case(".2s", true)
1849 .Case(".4s", true)
1850 .Case(".1d", true)
1851 .Case(".2d", true)
1852 .Case(".1q", true)
1853 // Accept the width neutral ones, too, for verbose syntax. If those
1854 // aren't used in the right places, the token operand won't match so
1855 // all will work out.
1856 .Case(".b", true)
1857 .Case(".h", true)
1858 .Case(".s", true)
1859 .Case(".d", true)
1860 // Needed for fp16 scalar pairwise reductions
1861 .Case(".2h", true)
1862 // another special case for the ARMv8.2a dot product operand
1863 .Case(".4b", true)
1864 .Default(false);
1865}
1866
1867static unsigned matchSVEDataVectorRegName(StringRef Name) {
1868 return StringSwitch<unsigned>(Name.lower())
1869 .Case("z0", AArch64::Z0)
1870 .Case("z1", AArch64::Z1)
1871 .Case("z2", AArch64::Z2)
1872 .Case("z3", AArch64::Z3)
1873 .Case("z4", AArch64::Z4)
1874 .Case("z5", AArch64::Z5)
1875 .Case("z6", AArch64::Z6)
1876 .Case("z7", AArch64::Z7)
1877 .Case("z8", AArch64::Z8)
1878 .Case("z9", AArch64::Z9)
1879 .Case("z10", AArch64::Z10)
1880 .Case("z11", AArch64::Z11)
1881 .Case("z12", AArch64::Z12)
1882 .Case("z13", AArch64::Z13)
1883 .Case("z14", AArch64::Z14)
1884 .Case("z15", AArch64::Z15)
1885 .Case("z16", AArch64::Z16)
1886 .Case("z17", AArch64::Z17)
1887 .Case("z18", AArch64::Z18)
1888 .Case("z19", AArch64::Z19)
1889 .Case("z20", AArch64::Z20)
1890 .Case("z21", AArch64::Z21)
1891 .Case("z22", AArch64::Z22)
1892 .Case("z23", AArch64::Z23)
1893 .Case("z24", AArch64::Z24)
1894 .Case("z25", AArch64::Z25)
1895 .Case("z26", AArch64::Z26)
1896 .Case("z27", AArch64::Z27)
1897 .Case("z28", AArch64::Z28)
1898 .Case("z29", AArch64::Z29)
1899 .Case("z30", AArch64::Z30)
1900 .Case("z31", AArch64::Z31)
1901 .Default(0);
1902}
1903
1904static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
1905 return StringSwitch<unsigned>(Name.lower())
1906 .Case("p0", AArch64::P0)
1907 .Case("p1", AArch64::P1)
1908 .Case("p2", AArch64::P2)
1909 .Case("p3", AArch64::P3)
1910 .Case("p4", AArch64::P4)
1911 .Case("p5", AArch64::P5)
1912 .Case("p6", AArch64::P6)
1913 .Case("p7", AArch64::P7)
1914 .Case("p8", AArch64::P8)
1915 .Case("p9", AArch64::P9)
1916 .Case("p10", AArch64::P10)
1917 .Case("p11", AArch64::P11)
1918 .Case("p12", AArch64::P12)
1919 .Case("p13", AArch64::P13)
1920 .Case("p14", AArch64::P14)
1921 .Case("p15", AArch64::P15)
1922 .Default(0);
1923}
1924
1925static bool isValidSVEKind(StringRef Name) {
1926 return StringSwitch<bool>(Name.lower())
1927 .Case(".b", true)
1928 .Case(".h", true)
1929 .Case(".s", true)
1930 .Case(".d", true)
1931 .Case(".q", true)
1932 .Default(false);
1933}
1934
1935static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1936 char &ElementKind) {
1937 assert(isValidVectorKind(Name))(static_cast <bool> (isValidVectorKind(Name)) ? void (0
) : __assert_fail ("isValidVectorKind(Name)", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1937, __extension__ __PRETTY_FUNCTION__))
;
1938
1939 ElementKind = Name.lower()[Name.size() - 1];
1940 NumElements = 0;
1941
1942 if (Name.size() == 2)
1943 return;
1944
1945 // Parse the lane count
1946 Name = Name.drop_front();
1947 while (isdigit(Name.front())) {
1948 NumElements = 10 * NumElements + (Name.front() - '0');
1949 Name = Name.drop_front();
1950 }
1951}
1952
1953bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1954 SMLoc &EndLoc) {
1955 StartLoc = getLoc();
1956 RegNo = tryParseRegister();
1957 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1958 return (RegNo == (unsigned)-1);
1959}
1960
1961// Matches a register name or register alias previously defined by '.req'
1962unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1963 RegKind Kind) {
1964 unsigned RegNum = 0;
1965 if ((RegNum = matchSVEDataVectorRegName(Name)))
1966 return Kind == RegKind::SVEDataVector ? RegNum : 0;
1967
1968 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
1969 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
1970
1971 if ((RegNum = MatchNeonVectorRegName(Name)))
1972 return Kind == RegKind::NeonVector ? RegNum : 0;
1973
1974 // The parsed register must be of RegKind Scalar
1975 if ((RegNum = MatchRegisterName(Name)))
1976 return Kind == RegKind::Scalar ? RegNum : 0;
1977
1978 if (!RegNum) {
1979 // Check for aliases registered via .req. Canonicalize to lower case.
1980 // That's more consistent since register names are case insensitive, and
1981 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1982 auto Entry = RegisterReqs.find(Name.lower());
1983 if (Entry == RegisterReqs.end())
1984 return 0;
1985
1986 // set RegNum if the match is the right kind of register
1987 if (Kind == Entry->getValue().first)
1988 RegNum = Entry->getValue().second;
1989 }
1990 return RegNum;
1991}
1992
1993/// tryParseRegister - Try to parse a register name. The token must be an
1994/// Identifier when called, and if it is a register name the token is eaten and
1995/// the register is added to the operand list.
1996int AArch64AsmParser::tryParseRegister() {
1997 MCAsmParser &Parser = getParser();
1998 const AsmToken &Tok = Parser.getTok();
1999 if (Tok.isNot(AsmToken::Identifier))
2000 return -1;
2001
2002 std::string lowerCase = Tok.getString().lower();
2003 unsigned RegNum = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2004
2005 // Also handle a few aliases of registers.
2006 if (RegNum == 0)
2007 RegNum = StringSwitch<unsigned>(lowerCase)
2008 .Case("fp", AArch64::FP)
2009 .Case("lr", AArch64::LR)
2010 .Case("x31", AArch64::XZR)
2011 .Case("w31", AArch64::WZR)
2012 .Default(0);
2013
2014 if (RegNum == 0)
2015 return -1;
2016
2017 Parser.Lex(); // Eat identifier token.
2018 return RegNum;
2019}
2020
2021/// tryMatchVectorRegister - Try to parse a vector register name with optional
2022/// kind specifier. If it is a register specifier, eat the token and return it.
2023int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
2024 MCAsmParser &Parser = getParser();
2025 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2026 TokError("vector register expected");
2027 return -1;
2028 }
2029
2030 StringRef Name = Parser.getTok().getString();
2031 // If there is a kind specifier, it's separated from the register name by
2032 // a '.'.
2033 size_t Start = 0, Next = Name.find('.');
2034 StringRef Head = Name.slice(Start, Next);
2035 unsigned RegNum = matchRegisterNameAlias(Head, RegKind::NeonVector);
2036
2037 if (RegNum) {
2038 if (Next != StringRef::npos) {
2039 Kind = Name.slice(Next, StringRef::npos);
2040 if (!isValidVectorKind(Kind)) {
2041 TokError("invalid vector kind qualifier");
2042 return -1;
2043 }
2044 }
2045 Parser.Lex(); // Eat the register token.
2046 return RegNum;
2047 }
2048
2049 if (expected)
2050 TokError("vector register expected");
2051 return -1;
2052}
2053
2054/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2055OperandMatchResultTy
2056AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2057 MCAsmParser &Parser = getParser();
2058 SMLoc S = getLoc();
2059
2060 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2061 Error(S, "Expected cN operand where 0 <= N <= 15");
2062 return MatchOperand_ParseFail;
2063 }
2064
2065 StringRef Tok = Parser.getTok().getIdentifier();
2066 if (Tok[0] != 'c' && Tok[0] != 'C') {
2067 Error(S, "Expected cN operand where 0 <= N <= 15");
2068 return MatchOperand_ParseFail;
2069 }
2070
2071 uint32_t CRNum;
2072 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2073 if (BadNum || CRNum > 15) {
2074 Error(S, "Expected cN operand where 0 <= N <= 15");
2075 return MatchOperand_ParseFail;
2076 }
2077
2078 Parser.Lex(); // Eat identifier token.
2079 Operands.push_back(
2080 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2081 return MatchOperand_Success;
2082}
2083
2084/// tryParsePrefetch - Try to parse a prefetch operand.
2085OperandMatchResultTy
2086AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2087 MCAsmParser &Parser = getParser();
2088 SMLoc S = getLoc();
2089 const AsmToken &Tok = Parser.getTok();
2090 // Either an identifier for named values or a 5-bit immediate.
2091 // Eat optional hash.
2092 if (parseOptionalToken(AsmToken::Hash) ||
2093 Tok.is(AsmToken::Integer)) {
2094 const MCExpr *ImmVal;
2095 if (getParser().parseExpression(ImmVal))
2096 return MatchOperand_ParseFail;
2097
2098 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2099 if (!MCE) {
2100 TokError("immediate value expected for prefetch operand");
2101 return MatchOperand_ParseFail;
2102 }
2103 unsigned prfop = MCE->getValue();
2104 if (prfop > 31) {
2105 TokError("prefetch operand out of range, [0,31] expected");
2106 return MatchOperand_ParseFail;
2107 }
2108
2109 auto PRFM = AArch64PRFM::lookupPRFMByEncoding(MCE->getValue());
2110 Operands.push_back(AArch64Operand::CreatePrefetch(
2111 prfop, PRFM ? PRFM->Name : "", S, getContext()));
2112 return MatchOperand_Success;
2113 }
2114
2115 if (Tok.isNot(AsmToken::Identifier)) {
2116 TokError("pre-fetch hint expected");
2117 return MatchOperand_ParseFail;
2118 }
2119
2120 auto PRFM = AArch64PRFM::lookupPRFMByName(Tok.getString());
2121 if (!PRFM) {
2122 TokError("pre-fetch hint expected");
2123 return MatchOperand_ParseFail;
2124 }
2125
2126 Parser.Lex(); // Eat identifier token.
2127 Operands.push_back(AArch64Operand::CreatePrefetch(
2128 PRFM->Encoding, Tok.getString(), S, getContext()));
2129 return MatchOperand_Success;
2130}
2131
2132/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2133OperandMatchResultTy
2134AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2135 MCAsmParser &Parser = getParser();
2136 SMLoc S = getLoc();
2137 const AsmToken &Tok = Parser.getTok();
2138 if (Tok.isNot(AsmToken::Identifier)) {
2139 TokError("invalid operand for instruction");
2140 return MatchOperand_ParseFail;
2141 }
2142
2143 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2144 if (!PSB) {
2145 TokError("invalid operand for instruction");
2146 return MatchOperand_ParseFail;
2147 }
2148
2149 Parser.Lex(); // Eat identifier token.
2150 Operands.push_back(AArch64Operand::CreatePSBHint(
2151 PSB->Encoding, Tok.getString(), S, getContext()));
2152 return MatchOperand_Success;
2153}
2154
2155/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2156/// instruction.
2157OperandMatchResultTy
2158AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2159 MCAsmParser &Parser = getParser();
2160 SMLoc S = getLoc();
2161 const MCExpr *Expr;
2162
2163 if (Parser.getTok().is(AsmToken::Hash)) {
2164 Parser.Lex(); // Eat hash token.
2165 }
2166
2167 if (parseSymbolicImmVal(Expr))
2168 return MatchOperand_ParseFail;
2169
2170 AArch64MCExpr::VariantKind ELFRefKind;
2171 MCSymbolRefExpr::VariantKind DarwinRefKind;
2172 int64_t Addend;
2173 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2174 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2175 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2176 // No modifier was specified at all; this is the syntax for an ELF basic
2177 // ADRP relocation (unfortunately).
2178 Expr =
2179 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2180 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2181 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2182 Addend != 0) {
2183 Error(S, "gotpage label reference not allowed an addend");
2184 return MatchOperand_ParseFail;
2185 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2186 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2187 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2188 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2189 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2190 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2191 // The operand must be an @page or @gotpage qualified symbolref.
2192 Error(S, "page or gotpage label reference expected");
2193 return MatchOperand_ParseFail;
2194 }
2195 }
2196
2197 // We have either a label reference possibly with addend or an immediate. The
2198 // addend is a raw value here. The linker will adjust it to only reference the
2199 // page.
2200 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2201 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2202
2203 return MatchOperand_Success;
2204}
2205
2206/// tryParseAdrLabel - Parse and validate a source label for the ADR
2207/// instruction.
2208OperandMatchResultTy
2209AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2210 SMLoc S = getLoc();
2211 const MCExpr *Expr;
2212
2213 parseOptionalToken(AsmToken::Hash);
2214 if (getParser().parseExpression(Expr))
2215 return MatchOperand_ParseFail;
2216
2217 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2218 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2219
2220 return MatchOperand_Success;
2221}
2222
2223/// tryParseFPImm - A floating point immediate expression operand.
2224OperandMatchResultTy
2225AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2226 MCAsmParser &Parser = getParser();
2227 SMLoc S = getLoc();
2228
2229 bool Hash = parseOptionalToken(AsmToken::Hash);
2230
2231 // Handle negation, as that still comes through as a separate token.
2232 bool isNegative = parseOptionalToken(AsmToken::Minus);
2233
2234 const AsmToken &Tok = Parser.getTok();
2235 if (Tok.is(AsmToken::Real) || Tok.is(AsmToken::Integer)) {
2236 int64_t Val;
2237 if (Tok.is(AsmToken::Integer) && !isNegative && Tok.getString().startswith("0x")) {
2238 Val = Tok.getIntVal();
2239 if (Val > 255 || Val < 0) {
2240 TokError("encoded floating point value out of range");
2241 return MatchOperand_ParseFail;
2242 }
2243 } else {
2244 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
2245 if (isNegative)
2246 RealVal.changeSign();
2247
2248 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2249 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2250
2251 // Check for out of range values. As an exception we let Zero through,
2252 // but as tokens instead of an FPImm so that it can be matched by the
2253 // appropriate alias if one exists.
2254 if (RealVal.isPosZero()) {
2255 Parser.Lex(); // Eat the token.
2256 Operands.push_back(AArch64Operand::CreateToken("#0", false, S, getContext()));
2257 Operands.push_back(AArch64Operand::CreateToken(".0", false, S, getContext()));
2258 return MatchOperand_Success;
2259 } else if (Val == -1) {
2260 TokError("expected compatible register or floating-point constant");
2261 return MatchOperand_ParseFail;
2262 }
2263 }
2264 Parser.Lex(); // Eat the token.
2265 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2266 return MatchOperand_Success;
2267 }
2268
2269 if (!Hash)
2270 return MatchOperand_NoMatch;
2271
2272 TokError("invalid floating point immediate");
2273 return MatchOperand_ParseFail;
2274}
2275
2276/// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2277OperandMatchResultTy
2278AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2279 MCAsmParser &Parser = getParser();
2280 SMLoc S = getLoc();
2281
2282 if (Parser.getTok().is(AsmToken::Hash))
1
Taking true branch
2283 Parser.Lex(); // Eat '#'
2284 else if (Parser.getTok().isNot(AsmToken::Integer))
2285 // Operand should start from # or should be integer, emit error otherwise.
2286 return MatchOperand_NoMatch;
2287
2288 const MCExpr *Imm;
2
'Imm' declared without an initial value
2289 if (parseSymbolicImmVal(Imm))
3
Calling 'AArch64AsmParser::parseSymbolicImmVal'
8
Returning from 'AArch64AsmParser::parseSymbolicImmVal'
9
Assuming the condition is false
10
Taking false branch
2290 return MatchOperand_ParseFail;
2291 else if (Parser.getTok().isNot(AsmToken::Comma)) {
11
Taking false branch
2292 uint64_t ShiftAmount = 0;
2293 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2294 if (MCE) {
2295 int64_t Val = MCE->getValue();
2296 if (Val > 0xfff && (Val & 0xfff) == 0) {
2297 Imm = MCConstantExpr::create(Val >> 12, getContext());
2298 ShiftAmount = 12;
2299 }
2300 }
2301 SMLoc E = Parser.getTok().getLoc();
2302 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2303 getContext()));
2304 return MatchOperand_Success;
2305 }
2306
2307 // Eat ','
2308 Parser.Lex();
2309
2310 // The optional operand must be "lsl #N" where N is non-negative.
2311 if (!Parser.getTok().is(AsmToken::Identifier) ||
13
Taking false branch
2312 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
12
Assuming the condition is false
2313 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2314 return MatchOperand_ParseFail;
2315 }
2316
2317 // Eat 'lsl'
2318 Parser.Lex();
2319
2320 parseOptionalToken(AsmToken::Hash);
2321
2322 if (Parser.getTok().isNot(AsmToken::Integer)) {
14
Taking false branch
2323 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2324 return MatchOperand_ParseFail;
2325 }
2326
2327 int64_t ShiftAmount = Parser.getTok().getIntVal();
2328
2329 if (ShiftAmount < 0) {
15
Assuming 'ShiftAmount' is >= 0
16
Taking false branch
2330 Error(Parser.getTok().getLoc(), "positive shift amount required");
2331 return MatchOperand_ParseFail;
2332 }
2333 Parser.Lex(); // Eat the number
2334
2335 SMLoc E = Parser.getTok().getLoc();
2336 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
17
1st function call argument is an uninitialized value
2337 S, E, getContext()));
2338 return MatchOperand_Success;
2339}
2340
2341/// parseCondCodeString - Parse a Condition Code string.
2342AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2343 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2344 .Case("eq", AArch64CC::EQ)
2345 .Case("ne", AArch64CC::NE)
2346 .Case("cs", AArch64CC::HS)
2347 .Case("hs", AArch64CC::HS)
2348 .Case("cc", AArch64CC::LO)
2349 .Case("lo", AArch64CC::LO)
2350 .Case("mi", AArch64CC::MI)
2351 .Case("pl", AArch64CC::PL)
2352 .Case("vs", AArch64CC::VS)
2353 .Case("vc", AArch64CC::VC)
2354 .Case("hi", AArch64CC::HI)
2355 .Case("ls", AArch64CC::LS)
2356 .Case("ge", AArch64CC::GE)
2357 .Case("lt", AArch64CC::LT)
2358 .Case("gt", AArch64CC::GT)
2359 .Case("le", AArch64CC::LE)
2360 .Case("al", AArch64CC::AL)
2361 .Case("nv", AArch64CC::NV)
2362 .Default(AArch64CC::Invalid);
2363 return CC;
2364}
2365
2366/// parseCondCode - Parse a Condition Code operand.
2367bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2368 bool invertCondCode) {
2369 MCAsmParser &Parser = getParser();
2370 SMLoc S = getLoc();
2371 const AsmToken &Tok = Parser.getTok();
2372 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")(static_cast <bool> (Tok.is(AsmToken::Identifier) &&
"Token is not an Identifier") ? void (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2372, __extension__ __PRETTY_FUNCTION__))
;
2373
2374 StringRef Cond = Tok.getString();
2375 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2376 if (CC == AArch64CC::Invalid)
2377 return TokError("invalid condition code");
2378 Parser.Lex(); // Eat identifier token.
2379
2380 if (invertCondCode) {
2381 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2382 return TokError("condition codes AL and NV are invalid for this instruction");
2383 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2384 }
2385
2386 Operands.push_back(
2387 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2388 return false;
2389}
2390
2391/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2392/// them if present.
2393OperandMatchResultTy
2394AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2395 MCAsmParser &Parser = getParser();
2396 const AsmToken &Tok = Parser.getTok();
2397 std::string LowerID = Tok.getString().lower();
2398 AArch64_AM::ShiftExtendType ShOp =
2399 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2400 .Case("lsl", AArch64_AM::LSL)
2401 .Case("lsr", AArch64_AM::LSR)
2402 .Case("asr", AArch64_AM::ASR)
2403 .Case("ror", AArch64_AM::ROR)
2404 .Case("msl", AArch64_AM::MSL)
2405 .Case("uxtb", AArch64_AM::UXTB)
2406 .Case("uxth", AArch64_AM::UXTH)
2407 .Case("uxtw", AArch64_AM::UXTW)
2408 .Case("uxtx", AArch64_AM::UXTX)
2409 .Case("sxtb", AArch64_AM::SXTB)
2410 .Case("sxth", AArch64_AM::SXTH)
2411 .Case("sxtw", AArch64_AM::SXTW)
2412 .Case("sxtx", AArch64_AM::SXTX)
2413 .Default(AArch64_AM::InvalidShiftExtend);
2414
2415 if (ShOp == AArch64_AM::InvalidShiftExtend)
2416 return MatchOperand_NoMatch;
2417
2418 SMLoc S = Tok.getLoc();
2419 Parser.Lex();
2420
2421 bool Hash = parseOptionalToken(AsmToken::Hash);
2422
2423 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2424 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2425 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2426 ShOp == AArch64_AM::MSL) {
2427 // We expect a number here.
2428 TokError("expected #imm after shift specifier");
2429 return MatchOperand_ParseFail;
2430 }
2431
2432 // "extend" type operations don't need an immediate, #0 is implicit.
2433 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2434 Operands.push_back(
2435 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2436 return MatchOperand_Success;
2437 }
2438
2439 // Make sure we do actually have a number, identifier or a parenthesized
2440 // expression.
2441 SMLoc E = Parser.getTok().getLoc();
2442 if (!Parser.getTok().is(AsmToken::Integer) &&
2443 !Parser.getTok().is(AsmToken::LParen) &&
2444 !Parser.getTok().is(AsmToken::Identifier)) {
2445 Error(E, "expected integer shift amount");
2446 return MatchOperand_ParseFail;
2447 }
2448
2449 const MCExpr *ImmVal;
2450 if (getParser().parseExpression(ImmVal))
2451 return MatchOperand_ParseFail;
2452
2453 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2454 if (!MCE) {
2455 Error(E, "expected constant '#imm' after shift specifier");
2456 return MatchOperand_ParseFail;
2457 }
2458
2459 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2460 Operands.push_back(AArch64Operand::CreateShiftExtend(
2461 ShOp, MCE->getValue(), true, S, E, getContext()));
2462 return MatchOperand_Success;
2463}
2464
2465static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2466 if (FBS[AArch64::HasV8_1aOps])
2467 Str += "ARMv8.1a";
2468 else if (FBS[AArch64::HasV8_2aOps])
2469 Str += "ARMv8.2a";
2470 else
2471 Str += "(unknown)";
2472}
2473
2474void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2475 SMLoc S) {
2476 const uint16_t Op2 = Encoding & 7;
2477 const uint16_t Cm = (Encoding & 0x78) >> 3;
2478 const uint16_t Cn = (Encoding & 0x780) >> 7;
2479 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2480
2481 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2482
2483 Operands.push_back(
2484 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2485 Operands.push_back(
2486 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2487 Operands.push_back(
2488 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2489 Expr = MCConstantExpr::create(Op2, getContext());
2490 Operands.push_back(
2491 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2492}
2493
2494/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2495/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2496bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2497 OperandVector &Operands) {
2498 if (Name.find('.') != StringRef::npos)
2499 return TokError("invalid operand");
2500
2501 Mnemonic = Name;
2502 Operands.push_back(
2503 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2504
2505 MCAsmParser &Parser = getParser();
2506 const AsmToken &Tok = Parser.getTok();
2507 StringRef Op = Tok.getString();
2508 SMLoc S = Tok.getLoc();
2509
2510 if (Mnemonic == "ic") {
2511 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2512 if (!IC)
2513 return TokError("invalid operand for IC instruction");
2514 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2515 std::string Str("IC " + std::string(IC->Name) + " requires ");
2516 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2517 return TokError(Str.c_str());
2518 }
2519 createSysAlias(IC->Encoding, Operands, S);
2520 } else if (Mnemonic == "dc") {
2521 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2522 if (!DC)
2523 return TokError("invalid operand for DC instruction");
2524 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2525 std::string Str("DC " + std::string(DC->Name) + " requires ");
2526 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
2527 return TokError(Str.c_str());
2528 }
2529 createSysAlias(DC->Encoding, Operands, S);
2530 } else if (Mnemonic == "at") {
2531 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2532 if (!AT)
2533 return TokError("invalid operand for AT instruction");
2534 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2535 std::string Str("AT " + std::string(AT->Name) + " requires ");
2536 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
2537 return TokError(Str.c_str());
2538 }
2539 createSysAlias(AT->Encoding, Operands, S);
2540 } else if (Mnemonic == "tlbi") {
2541 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2542 if (!TLBI)
2543 return TokError("invalid operand for TLBI instruction");
2544 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2545 std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2546 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
2547 return TokError(Str.c_str());
2548 }
2549 createSysAlias(TLBI->Encoding, Operands, S);
2550 }
2551
2552 Parser.Lex(); // Eat operand.
2553
2554 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2555 bool HasRegister = false;
2556
2557 // Check for the optional register operand.
2558 if (parseOptionalToken(AsmToken::Comma)) {
2559 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2560 return TokError("expected register operand");
2561 HasRegister = true;
2562 }
2563
2564 if (ExpectRegister && !HasRegister)
2565 return TokError("specified " + Mnemonic + " op requires a register");
2566 else if (!ExpectRegister && HasRegister)
2567 return TokError("specified " + Mnemonic + " op does not use a register");
2568
2569 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2570 return true;
2571
2572 return false;
2573}
2574
2575OperandMatchResultTy
2576AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2577 MCAsmParser &Parser = getParser();
2578 const AsmToken &Tok = Parser.getTok();
2579
2580 // Can be either a #imm style literal or an option name
2581 if (parseOptionalToken(AsmToken::Hash) ||
2582 Tok.is(AsmToken::Integer)) {
2583 // Immediate operand.
2584 const MCExpr *ImmVal;
2585 SMLoc ExprLoc = getLoc();
2586 if (getParser().parseExpression(ImmVal))
2587 return MatchOperand_ParseFail;
2588 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2589 if (!MCE) {
2590 Error(ExprLoc, "immediate value expected for barrier operand");
2591 return MatchOperand_ParseFail;
2592 }
2593 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2594 Error(ExprLoc, "barrier operand out of range");
2595 return MatchOperand_ParseFail;
2596 }
2597 auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
2598 Operands.push_back(AArch64Operand::CreateBarrier(
2599 MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
2600 return MatchOperand_Success;
2601 }
2602
2603 if (Tok.isNot(AsmToken::Identifier)) {
2604 TokError("invalid operand for instruction");
2605 return MatchOperand_ParseFail;
2606 }
2607
2608 // The only valid named option for ISB is 'sy'
2609 auto DB = AArch64DB::lookupDBByName(Tok.getString());
2610 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
2611 TokError("'sy' or #imm operand expected");
2612 return MatchOperand_ParseFail;
2613 } else if (!DB) {
2614 TokError("invalid barrier option name");
2615 return MatchOperand_ParseFail;
2616 }
2617
2618 Operands.push_back(AArch64Operand::CreateBarrier(
2619 DB->Encoding, Tok.getString(), getLoc(), getContext()));
2620 Parser.Lex(); // Consume the option
2621
2622 return MatchOperand_Success;
2623}
2624
2625OperandMatchResultTy
2626AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2627 MCAsmParser &Parser = getParser();
2628 const AsmToken &Tok = Parser.getTok();
2629
2630 if (Tok.isNot(AsmToken::Identifier))
2631 return MatchOperand_NoMatch;
2632
2633 int MRSReg, MSRReg;
2634 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
2635 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
2636 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
2637 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
2638 } else
2639 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
2640
2641 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
2642 unsigned PStateImm = -1;
2643 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
2644 PStateImm = PState->Encoding;
2645
2646 Operands.push_back(
2647 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
2648 PStateImm, getContext()));
2649 Parser.Lex(); // Eat identifier
2650
2651 return MatchOperand_Success;
2652}
2653
2654/// tryParseNeonVectorRegister - Parse a vector register operand.
2655bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
2656 MCAsmParser &Parser = getParser();
2657 if (Parser.getTok().isNot(AsmToken::Identifier))
2658 return true;
2659
2660 SMLoc S = getLoc();
2661 // Check for a vector register specifier first.
2662 StringRef Kind;
2663 int64_t Reg = tryMatchVectorRegister(Kind, false);
2664 if (Reg == -1)
2665 return true;
2666 Operands.push_back(
2667 AArch64Operand::CreateReg(Reg, RegKind::NeonVector, S, getLoc(),
2668 getContext()));
2669
2670 // If there was an explicit qualifier, that goes on as a literal text
2671 // operand.
2672 if (!Kind.empty())
2673 Operands.push_back(
2674 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2675
2676 // If there is an index specifier following the register, parse that too.
2677 SMLoc SIdx = getLoc();
2678 if (parseOptionalToken(AsmToken::LBrac)) {
2679 const MCExpr *ImmVal;
2680 if (getParser().parseExpression(ImmVal))
2681 return false;
2682 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2683 if (!MCE) {
2684 TokError("immediate value expected for vector index");
2685 return false;
2686 }
2687
2688 SMLoc E = getLoc();
2689
2690 if (parseToken(AsmToken::RBrac, "']' expected"))
2691 return false;
2692
2693 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2694 E, getContext()));
2695 }
2696
2697 return false;
2698}
2699
2700// tryParseSVEDataVectorRegister - Try to parse a SVE vector register name with
2701// optional kind specifier. If it is a register specifier, eat the token
2702// and return it.
2703OperandMatchResultTy
2704AArch64AsmParser::tryParseSVERegister(int &Reg, StringRef &Kind,
2705 RegKind MatchKind) {
2706 MCAsmParser &Parser = getParser();
2707 const AsmToken &Tok = Parser.getTok();
2708
2709 if (Tok.isNot(AsmToken::Identifier))
2710 return MatchOperand_NoMatch;
2711
2712 StringRef Name = Tok.getString();
2713 // If there is a kind specifier, it's separated from the register name by
2714 // a '.'.
2715 size_t Start = 0, Next = Name.find('.');
2716 StringRef Head = Name.slice(Start, Next);
2717 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
2718
2719 if (RegNum) {
2720 if (Next != StringRef::npos) {
2721 Kind = Name.slice(Next, StringRef::npos);
2722 if (!isValidSVEKind(Kind)) {
2723 TokError("invalid sve vector kind qualifier");
2724 return MatchOperand_ParseFail;
2725 }
2726 }
2727 Parser.Lex(); // Eat the register token.
2728
2729 Reg = RegNum;
2730 return MatchOperand_Success;
2731 }
2732
2733 return MatchOperand_NoMatch;
2734}
2735
2736/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
2737OperandMatchResultTy
2738AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
2739 // Check for a SVE predicate register specifier first.
2740 const SMLoc S = getLoc();
2741 StringRef Kind;
2742 int RegNum = -1;
2743 auto Res = tryParseSVERegister(RegNum, Kind, RegKind::SVEPredicateVector);
2744 if (Res != MatchOperand_Success)
2745 return Res;
2746
2747 unsigned ElementWidth = StringSwitch<unsigned>(Kind.lower())
2748 .Case("", -1)
2749 .Case(".b", 8)
2750 .Case(".h", 16)
2751 .Case(".s", 32)
2752 .Case(".d", 64)
2753 .Case(".q", 128)
2754 .Default(0);
2755
2756 if (!ElementWidth)
2757 return MatchOperand_NoMatch;
2758
2759 Operands.push_back(
2760 AArch64Operand::CreateReg(RegNum, RegKind::SVEPredicateVector,
2761 ElementWidth, S, getLoc(), getContext()));
2762
2763 // Not all predicates are followed by a '/m' or '/z'.
2764 MCAsmParser &Parser = getParser();
2765 if (Parser.getTok().isNot(AsmToken::Slash))
2766 return MatchOperand_Success;
2767
2768 // But when they do they shouldn't have an element type suffix.
2769 if (!Kind.empty()) {
2770 Error(S, "not expecting size suffix");
2771 return MatchOperand_ParseFail;
2772 }
2773
2774 // Add a literal slash as operand
2775 Operands.push_back(
2776 AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
2777
2778 Parser.Lex(); // Eat the slash.
2779
2780 // Zeroing or merging?
2781 auto Pred = Parser.getTok().getString().lower();
2782 if (Pred != "z" && Pred != "m") {
2783 Error(getLoc(), "expecting 'm' or 'z' predication");
2784 return MatchOperand_ParseFail;
2785 }
2786
2787 // Add zero/merge token.
2788 const char *ZM = Pred == "z" ? "z" : "m";
2789 Operands.push_back(
2790 AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
2791
2792 Parser.Lex(); // Eat zero/merge token.
2793 return MatchOperand_Success;
2794}
2795
2796/// parseRegister - Parse a non-vector register operand.
2797bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2798 SMLoc S = getLoc();
2799 // Try for a vector (neon) register.
2800 if (!tryParseNeonVectorRegister(Operands))
2801 return false;
2802
2803 // Try for a scalar register.
2804 int64_t Reg = tryParseRegister();
2805 if (Reg == -1)
2806 return true;
2807 Operands.push_back(AArch64Operand::CreateReg(Reg, RegKind::Scalar, S,
2808 getLoc(), getContext()));
2809
2810 return false;
2811}
2812
2813bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2814 MCAsmParser &Parser = getParser();
2815 bool HasELFModifier = false;
2816 AArch64MCExpr::VariantKind RefKind;
2817
2818 if (parseOptionalToken(AsmToken::Colon)) {
4
Assuming the condition is true
5
Taking true branch
2819 HasELFModifier = true;
2820
2821 if (Parser.getTok().isNot(AsmToken::Identifier))
6
Taking true branch
2822 return TokError("expect relocation specifier in operand after ':'");
7
Returning without writing to 'ImmVal'
2823
2824 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2825 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2826 .Case("lo12", AArch64MCExpr::VK_LO12)
2827 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2828 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2829 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2830 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2831 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2832 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2833 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2834 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2835 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2836 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2837 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2838 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2839 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2840 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2841 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2842 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2843 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2844 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2845 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2846 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2847 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2848 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2849 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2850 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2851 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2852 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2853 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2854 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2855 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2856 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2857 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2858 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2859 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2860 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2861 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
2862 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
2863 .Default(AArch64MCExpr::VK_INVALID);
2864
2865 if (RefKind == AArch64MCExpr::VK_INVALID)
2866 return TokError("expect relocation specifier in operand after ':'");
2867
2868 Parser.Lex(); // Eat identifier
2869
2870 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
2871 return true;
2872 }
2873
2874 if (getParser().parseExpression(ImmVal))
2875 return true;
2876
2877 if (HasELFModifier)
2878 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2879
2880 return false;
2881}
2882
2883/// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2884bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2885 MCAsmParser &Parser = getParser();
2886 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket")(static_cast <bool> (Parser.getTok().is(AsmToken::LCurly
) && "Token is not a Left Bracket") ? void (0) : __assert_fail
("Parser.getTok().is(AsmToken::LCurly) && \"Token is not a Left Bracket\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2886, __extension__ __PRETTY_FUNCTION__))
;
2887 SMLoc S = getLoc();
2888 Parser.Lex(); // Eat left bracket token.
2889 StringRef Kind;
2890 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2891 if (FirstReg == -1)
2892 return true;
2893 int64_t PrevReg = FirstReg;
2894 unsigned Count = 1;
2895
2896 if (parseOptionalToken(AsmToken::Minus)) {
2897 SMLoc Loc = getLoc();
2898 StringRef NextKind;
2899 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2900 if (Reg == -1)
2901 return true;
2902 // Any Kind suffices must match on all regs in the list.
2903 if (Kind != NextKind)
2904 return Error(Loc, "mismatched register size suffix");
2905
2906 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2907
2908 if (Space == 0 || Space > 3) {
2909 return Error(Loc, "invalid number of vectors");
2910 }
2911
2912 Count += Space;
2913 }
2914 else {
2915 while (parseOptionalToken(AsmToken::Comma)) {
2916 SMLoc Loc = getLoc();
2917 StringRef NextKind;
2918 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2919 if (Reg == -1)
2920 return true;
2921 // Any Kind suffices must match on all regs in the list.
2922 if (Kind != NextKind)
2923 return Error(Loc, "mismatched register size suffix");
2924
2925 // Registers must be incremental (with wraparound at 31)
2926 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2927 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2928 return Error(Loc, "registers must be sequential");
2929
2930 PrevReg = Reg;
2931 ++Count;
2932 }
2933 }
2934
2935 if (parseToken(AsmToken::RCurly, "'}' expected"))
2936 return true;
2937
2938 if (Count > 4)
2939 return Error(S, "invalid number of vectors");
2940
2941 unsigned NumElements = 0;
2942 char ElementKind = 0;
2943 if (!Kind.empty())
2944 parseValidVectorKind(Kind, NumElements, ElementKind);
2945
2946 Operands.push_back(AArch64Operand::CreateVectorList(
2947 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2948
2949 // If there is an index specifier following the list, parse that too.
2950 SMLoc SIdx = getLoc();
2951 if (parseOptionalToken(AsmToken::LBrac)) { // Eat left bracket token.
2952 const MCExpr *ImmVal;
2953 if (getParser().parseExpression(ImmVal))
2954 return false;
2955 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2956 if (!MCE) {
2957 TokError("immediate value expected for vector index");
2958 return false;
2959 }
2960
2961 SMLoc E = getLoc();
2962 if (parseToken(AsmToken::RBrac, "']' expected"))
2963 return false;
2964
2965 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2966 E, getContext()));
2967 }
2968 return false;
2969}
2970
2971OperandMatchResultTy
2972AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2973 MCAsmParser &Parser = getParser();
2974 const AsmToken &Tok = Parser.getTok();
2975 if (!Tok.is(AsmToken::Identifier))
2976 return MatchOperand_NoMatch;
2977
2978 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), RegKind::Scalar);
2979
2980 MCContext &Ctx = getContext();
2981 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2982 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2983 return MatchOperand_NoMatch;
2984
2985 SMLoc S = getLoc();
2986 Parser.Lex(); // Eat register
2987
2988 if (!parseOptionalToken(AsmToken::Comma)) {
2989 Operands.push_back(
2990 AArch64Operand::CreateReg(RegNum, RegKind::Scalar, S, getLoc(), Ctx));
2991 return MatchOperand_Success;
2992 }
2993
2994 parseOptionalToken(AsmToken::Hash);
2995
2996 if (Parser.getTok().isNot(AsmToken::Integer)) {
2997 Error(getLoc(), "index must be absent or #0");
2998 return MatchOperand_ParseFail;
2999 }
3000
3001 const MCExpr *ImmVal;
3002 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3003 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3004 Error(getLoc(), "index must be absent or #0");
3005 return MatchOperand_ParseFail;
3006 }
3007
3008 Operands.push_back(
3009 AArch64Operand::CreateReg(RegNum, RegKind::Scalar, S, getLoc(), Ctx));
3010 return MatchOperand_Success;
3011}
3012
3013/// parseOperand - Parse a arm instruction operand. For now this parses the
3014/// operand regardless of the mnemonic.
3015bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3016 bool invertCondCode) {
3017 MCAsmParser &Parser = getParser();
3018
3019 OperandMatchResultTy ResTy =
3020 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3021
3022 // Check if the current operand has a custom associated parser, if so, try to
3023 // custom parse the operand, or fallback to the general approach.
3024 if (ResTy == MatchOperand_Success)
3025 return false;
3026 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3027 // there was a match, but an error occurred, in which case, just return that
3028 // the operand parsing failed.
3029 if (ResTy == MatchOperand_ParseFail)
3030 return true;
3031
3032 // Nothing custom, so do general case parsing.
3033 SMLoc S, E;
3034 switch (getLexer().getKind()) {
3035 default: {
3036 SMLoc S = getLoc();
3037 const MCExpr *Expr;
3038 if (parseSymbolicImmVal(Expr))
3039 return Error(S, "invalid operand");
3040
3041 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3042 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3043 return false;
3044 }
3045 case AsmToken::LBrac: {
3046 SMLoc Loc = Parser.getTok().getLoc();
3047 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3048 getContext()));
3049 Parser.Lex(); // Eat '['
3050
3051 // There's no comma after a '[', so we can parse the next operand
3052 // immediately.
3053 return parseOperand(Operands, false, false);
3054 }
3055 case AsmToken::LCurly:
3056 return parseVectorList(Operands);
3057 case AsmToken::Identifier: {
3058 // If we're expecting a Condition Code operand, then just parse that.
3059 if (isCondCode)
3060 return parseCondCode(Operands, invertCondCode);
3061
3062 // If it's a register name, parse it.
3063 if (!parseRegister(Operands))
3064 return false;
3065
3066 // This could be an optional "shift" or "extend" operand.
3067 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3068 // We can only continue if no tokens were eaten.
3069 if (GotShift != MatchOperand_NoMatch)
3070 return GotShift;
3071
3072 // This was not a register so parse other operands that start with an
3073 // identifier (like labels) as expressions and create them as immediates.
3074 const MCExpr *IdVal;
3075 S = getLoc();
3076 if (getParser().parseExpression(IdVal))
3077 return true;
3078 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3079 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3080 return false;
3081 }
3082 case AsmToken::Integer:
3083 case AsmToken::Real:
3084 case AsmToken::Hash: {
3085 // #42 -> immediate.
3086 S = getLoc();
3087
3088 parseOptionalToken(AsmToken::Hash);
3089
3090 // Parse a negative sign
3091 bool isNegative = false;
3092 if (Parser.getTok().is(AsmToken::Minus)) {
3093 isNegative = true;
3094 // We need to consume this token only when we have a Real, otherwise
3095 // we let parseSymbolicImmVal take care of it
3096 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3097 Parser.Lex();
3098 }
3099
3100 // The only Real that should come through here is a literal #0.0 for
3101 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3102 // so convert the value.
3103 const AsmToken &Tok = Parser.getTok();
3104 if (Tok.is(AsmToken::Real)) {
3105 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3106 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3107 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3108 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3109 Mnemonic != "fcmlt")
3110 return TokError("unexpected floating point literal");
3111 else if (IntVal != 0 || isNegative)
3112 return TokError("expected floating-point constant #0.0");
3113 Parser.Lex(); // Eat the token.
3114
3115 Operands.push_back(
3116 AArch64Operand::CreateToken("#0", false, S, getContext()));
3117 Operands.push_back(
3118 AArch64Operand::CreateToken(".0", false, S, getContext()));
3119 return false;
3120 }
3121
3122 const MCExpr *ImmVal;
3123 if (parseSymbolicImmVal(ImmVal))
3124 return true;
3125
3126 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3127 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3128 return false;
3129 }
3130 case AsmToken::Equal: {
3131 SMLoc Loc = getLoc();
3132 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3133 return TokError("unexpected token in operand");
3134 Parser.Lex(); // Eat '='
3135 const MCExpr *SubExprVal;
3136 if (getParser().parseExpression(SubExprVal))
3137 return true;
3138
3139 if (Operands.size() < 2 ||
3140 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3141 return Error(Loc, "Only valid when first operand is register");
3142
3143 bool IsXReg =
3144 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3145 Operands[1]->getReg());
3146
3147 MCContext& Ctx = getContext();
3148 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3149 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3150 if (isa<MCConstantExpr>(SubExprVal)) {
3151 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3152 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3153 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3154 ShiftAmt += 16;
3155 Imm >>= 16;
3156 }
3157 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3158 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3159 Operands.push_back(AArch64Operand::CreateImm(
3160 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3161 if (ShiftAmt)
3162 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3163 ShiftAmt, true, S, E, Ctx));
3164 return false;
3165 }
3166 APInt Simm = APInt(64, Imm << ShiftAmt);
3167 // check if the immediate is an unsigned or signed 32-bit int for W regs
3168 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3169 return Error(Loc, "Immediate too large for register");
3170 }
3171 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3172 const MCExpr *CPLoc =
3173 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3174 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3175 return false;
3176 }
3177 }
3178}
3179
3180/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3181/// operands.
3182bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3183 StringRef Name, SMLoc NameLoc,
3184 OperandVector &Operands) {
3185 MCAsmParser &Parser = getParser();
3186 Name = StringSwitch<StringRef>(Name.lower())
3187 .Case("beq", "b.eq")
3188 .Case("bne", "b.ne")
3189 .Case("bhs", "b.hs")
3190 .Case("bcs", "b.cs")
3191 .Case("blo", "b.lo")
3192 .Case("bcc", "b.cc")
3193 .Case("bmi", "b.mi")
3194 .Case("bpl", "b.pl")
3195 .Case("bvs", "b.vs")
3196 .Case("bvc", "b.vc")
3197 .Case("bhi", "b.hi")
3198 .Case("bls", "b.ls")
3199 .Case("bge", "b.ge")
3200 .Case("blt", "b.lt")
3201 .Case("bgt", "b.gt")
3202 .Case("ble", "b.le")
3203 .Case("bal", "b.al")
3204 .Case("bnv", "b.nv")
3205 .Default(Name);
3206
3207 // First check for the AArch64-specific .req directive.
3208 if (Parser.getTok().is(AsmToken::Identifier) &&
3209 Parser.getTok().getIdentifier() == ".req") {
3210 parseDirectiveReq(Name, NameLoc);
3211 // We always return 'error' for this, as we're done with this
3212 // statement and don't need to match the 'instruction."
3213 return true;
3214 }
3215
3216 // Create the leading tokens for the mnemonic, split by '.' characters.
3217 size_t Start = 0, Next = Name.find('.');
3218 StringRef Head = Name.slice(Start, Next);
3219
3220 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3221 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
3222 return parseSysAlias(Head, NameLoc, Operands);
3223
3224 Operands.push_back(
3225 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3226 Mnemonic = Head;
3227
3228 // Handle condition codes for a branch mnemonic
3229 if (Head == "b" && Next != StringRef::npos) {
3230 Start = Next;
3231 Next = Name.find('.', Start + 1);
3232 Head = Name.slice(Start + 1, Next);
3233
3234 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3235 (Head.data() - Name.data()));
3236 AArch64CC::CondCode CC = parseCondCodeString(Head);
3237 if (CC == AArch64CC::Invalid)
3238 return Error(SuffixLoc, "invalid condition code");
3239 Operands.push_back(
3240 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3241 Operands.push_back(
3242 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3243 }
3244
3245 // Add the remaining tokens in the mnemonic.
3246 while (Next != StringRef::npos) {
3247 Start = Next;
3248 Next = Name.find('.', Start + 1);
3249 Head = Name.slice(Start, Next);
3250 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3251 (Head.data() - Name.data()) + 1);
3252 Operands.push_back(
3253 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3254 }
3255
3256 // Conditional compare instructions have a Condition Code operand, which needs
3257 // to be parsed and an immediate operand created.
3258 bool condCodeFourthOperand =
3259 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3260 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3261 Head == "csinc" || Head == "csinv" || Head == "csneg");
3262
3263 // These instructions are aliases to some of the conditional select
3264 // instructions. However, the condition code is inverted in the aliased
3265 // instruction.
3266 //
3267 // FIXME: Is this the correct way to handle these? Or should the parser
3268 // generate the aliased instructions directly?
3269 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3270 bool condCodeThirdOperand =
3271 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3272
3273 // Read the remaining operands.
3274 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3275 // Read the first operand.
3276 if (parseOperand(Operands, false, false)) {
3277 return true;
3278 }
3279
3280 unsigned N = 2;
3281 while (parseOptionalToken(AsmToken::Comma)) {
3282 // Parse and remember the operand.
3283 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3284 (N == 3 && condCodeThirdOperand) ||
3285 (N == 2 && condCodeSecondOperand),
3286 condCodeSecondOperand || condCodeThirdOperand)) {
3287 return true;
3288 }
3289
3290 // After successfully parsing some operands there are two special cases to
3291 // consider (i.e. notional operands not separated by commas). Both are due
3292 // to memory specifiers:
3293 // + An RBrac will end an address for load/store/prefetch
3294 // + An '!' will indicate a pre-indexed operation.
3295 //
3296 // It's someone else's responsibility to make sure these tokens are sane
3297 // in the given context!
3298
3299 SMLoc RLoc = Parser.getTok().getLoc();
3300 if (parseOptionalToken(AsmToken::RBrac))
3301 Operands.push_back(
3302 AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3303 SMLoc ELoc = Parser.getTok().getLoc();
3304 if (parseOptionalToken(AsmToken::Exclaim))
3305 Operands.push_back(
3306 AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3307
3308 ++N;
3309 }
3310 }
3311
3312 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3313 return true;
3314
3315 return false;
3316}
3317
3318// FIXME: This entire function is a giant hack to provide us with decent
3319// operand range validation/diagnostics until TableGen/MC can be extended
3320// to support autogeneration of this kind of validation.
3321bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3322 SmallVectorImpl<SMLoc> &Loc) {
3323 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3324 // Check for indexed addressing modes w/ the base register being the
3325 // same as a destination/source register or pair load where
3326 // the Rt == Rt2. All of those are undefined behaviour.
3327 switch (Inst.getOpcode()) {
3328 case AArch64::LDPSWpre:
3329 case AArch64::LDPWpost:
3330 case AArch64::LDPWpre:
3331 case AArch64::LDPXpost:
3332 case AArch64::LDPXpre: {
3333 unsigned Rt = Inst.getOperand(1).getReg();
3334 unsigned Rt2 = Inst.getOperand(2).getReg();
3335 unsigned Rn = Inst.getOperand(3).getReg();
3336 if (RI->isSubRegisterEq(Rn, Rt))
3337 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3338 "is also a destination");
3339 if (RI->isSubRegisterEq(Rn, Rt2))
3340 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3341 "is also a destination");
3342 LLVM_FALLTHROUGH[[clang::fallthrough]];
3343 }
3344 case AArch64::LDPDi:
3345 case AArch64::LDPQi:
3346 case AArch64::LDPSi:
3347 case AArch64::LDPSWi:
3348 case AArch64::LDPWi:
3349 case AArch64::LDPXi: {
3350 unsigned Rt = Inst.getOperand(0).getReg();
3351 unsigned Rt2 = Inst.getOperand(1).getReg();
3352 if (Rt == Rt2)
3353 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3354 break;
3355 }
3356 case AArch64::LDPDpost:
3357 case AArch64::LDPDpre:
3358 case AArch64::LDPQpost:
3359 case AArch64::LDPQpre:
3360 case AArch64::LDPSpost:
3361 case AArch64::LDPSpre:
3362 case AArch64::LDPSWpost: {
3363 unsigned Rt = Inst.getOperand(1).getReg();
3364 unsigned Rt2 = Inst.getOperand(2).getReg();
3365 if (Rt == Rt2)
3366 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3367 break;
3368 }
3369 case AArch64::STPDpost:
3370 case AArch64::STPDpre:
3371 case AArch64::STPQpost:
3372 case AArch64::STPQpre:
3373 case AArch64::STPSpost:
3374 case AArch64::STPSpre:
3375 case AArch64::STPWpost:
3376 case AArch64::STPWpre:
3377 case AArch64::STPXpost:
3378 case AArch64::STPXpre: {
3379 unsigned Rt = Inst.getOperand(1).getReg();
3380 unsigned Rt2 = Inst.getOperand(2).getReg();
3381 unsigned Rn = Inst.getOperand(3).getReg();
3382 if (RI->isSubRegisterEq(Rn, Rt))
3383 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3384 "is also a source");
3385 if (RI->isSubRegisterEq(Rn, Rt2))
3386 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3387 "is also a source");
3388 break;
3389 }
3390 case AArch64::LDRBBpre:
3391 case AArch64::LDRBpre:
3392 case AArch64::LDRHHpre:
3393 case AArch64::LDRHpre:
3394 case AArch64::LDRSBWpre:
3395 case AArch64::LDRSBXpre:
3396 case AArch64::LDRSHWpre:
3397 case AArch64::LDRSHXpre:
3398 case AArch64::LDRSWpre:
3399 case AArch64::LDRWpre:
3400 case AArch64::LDRXpre:
3401 case AArch64::LDRBBpost:
3402 case AArch64::LDRBpost:
3403 case AArch64::LDRHHpost:
3404 case AArch64::LDRHpost:
3405 case AArch64::LDRSBWpost:
3406 case AArch64::LDRSBXpost:
3407 case AArch64::LDRSHWpost:
3408 case AArch64::LDRSHXpost:
3409 case AArch64::LDRSWpost:
3410 case AArch64::LDRWpost:
3411 case AArch64::LDRXpost: {
3412 unsigned Rt = Inst.getOperand(1).getReg();
3413 unsigned Rn = Inst.getOperand(2).getReg();
3414 if (RI->isSubRegisterEq(Rn, Rt))
3415 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3416 "is also a source");
3417 break;
3418 }
3419 case AArch64::STRBBpost:
3420 case AArch64::STRBpost:
3421 case AArch64::STRHHpost:
3422 case AArch64::STRHpost:
3423 case AArch64::STRWpost:
3424 case AArch64::STRXpost:
3425 case AArch64::STRBBpre:
3426 case AArch64::STRBpre:
3427 case AArch64::STRHHpre:
3428 case AArch64::STRHpre:
3429 case AArch64::STRWpre:
3430 case AArch64::STRXpre: {
3431 unsigned Rt = Inst.getOperand(1).getReg();
3432 unsigned Rn = Inst.getOperand(2).getReg();
3433 if (RI->isSubRegisterEq(Rn, Rt))
3434 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3435 "is also a source");
3436 break;
3437 }
3438 }
3439
3440 // Now check immediate ranges. Separate from the above as there is overlap
3441 // in the instructions being checked and this keeps the nested conditionals
3442 // to a minimum.
3443 switch (Inst.getOpcode()) {
3444 case AArch64::ADDSWri:
3445 case AArch64::ADDSXri:
3446 case AArch64::ADDWri:
3447 case AArch64::ADDXri:
3448 case AArch64::SUBSWri:
3449 case AArch64::SUBSXri:
3450 case AArch64::SUBWri:
3451 case AArch64::SUBXri: {
3452 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3453 // some slight duplication here.
3454 if (Inst.getOperand(2).isExpr()) {
3455 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3456 AArch64MCExpr::VariantKind ELFRefKind;
3457 MCSymbolRefExpr::VariantKind DarwinRefKind;
3458 int64_t Addend;
3459 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3460
3461 // Only allow these with ADDXri.
3462 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3463 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3464 Inst.getOpcode() == AArch64::ADDXri)
3465 return false;
3466
3467 // Only allow these with ADDXri/ADDWri
3468 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3469 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3470 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3471 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3472 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3473 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3474 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3475 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
3476 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
3477 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
3478 (Inst.getOpcode() == AArch64::ADDXri ||
3479 Inst.getOpcode() == AArch64::ADDWri))
3480 return false;
3481
3482 // Don't allow symbol refs in the immediate field otherwise
3483 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
3484 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
3485 // 'cmp w0, 'borked')
3486 return Error(Loc.back(), "invalid immediate expression");
3487 }
3488 // We don't validate more complex expressions here
3489 }
3490 return false;
3491 }
3492 default:
3493 return false;
3494 }
3495}
3496
3497static std::string AArch64MnemonicSpellCheck(StringRef S, uint64_t FBS,
3498 unsigned VariantID = 0);
3499
3500bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
3501 OperandVector &Operands) {
3502 switch (ErrCode) {
3503 case Match_InvalidTiedOperand:
3504 return Error(Loc, "operand must match destination register");
3505 case Match_MissingFeature:
3506 return Error(Loc,
3507 "instruction requires a CPU feature not currently enabled");
3508 case Match_InvalidOperand:
3509 return Error(Loc, "invalid operand for instruction");
3510 case Match_InvalidSuffix:
3511 return Error(Loc, "invalid type suffix for instruction");
3512 case Match_InvalidCondCode:
3513 return Error(Loc, "expected AArch64 condition code");
3514 case Match_AddSubRegExtendSmall:
3515 return Error(Loc,
3516 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3517 case Match_AddSubRegExtendLarge:
3518 return Error(Loc,
3519 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3520 case Match_AddSubSecondSource:
3521 return Error(Loc,
3522 "expected compatible register, symbol or integer in range [0, 4095]");
3523 case Match_LogicalSecondSource:
3524 return Error(Loc, "expected compatible register or logical immediate");
3525 case Match_InvalidMovImm32Shift:
3526 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3527 case Match_InvalidMovImm64Shift:
3528 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3529 case Match_AddSubRegShift32:
3530 return Error(Loc,
3531 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3532 case Match_AddSubRegShift64:
3533 return Error(Loc,
3534 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3535 case Match_InvalidFPImm:
3536 return Error(Loc,
3537 "expected compatible register or floating-point constant");
3538 case Match_InvalidMemoryIndexedSImm6:
3539 return Error(Loc, "index must be an integer in range [-32, 31].");
3540 case Match_InvalidMemoryIndexedSImm5:
3541 return Error(Loc, "index must be an integer in range [-16, 15].");
3542 case Match_InvalidMemoryIndexedSImm9:
3543 return Error(Loc, "index must be an integer in range [-256, 255].");
3544 case Match_InvalidMemoryIndexedSImm10:
3545 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
3546 case Match_InvalidMemoryIndexed4SImm7:
3547 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3548 case Match_InvalidMemoryIndexed8SImm7:
3549 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3550 case Match_InvalidMemoryIndexed16SImm7:
3551 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3552 case Match_InvalidMemoryWExtend8:
3553 return Error(Loc,
3554 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3555 case Match_InvalidMemoryWExtend16:
3556 return Error(Loc,
3557 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3558 case Match_InvalidMemoryWExtend32:
3559 return Error(Loc,
3560 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3561 case Match_InvalidMemoryWExtend64:
3562 return Error(Loc,
3563 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3564 case Match_InvalidMemoryWExtend128:
3565 return Error(Loc,
3566 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3567 case Match_InvalidMemoryXExtend8:
3568 return Error(Loc,
3569 "expected 'lsl' or 'sxtx' with optional shift of #0");
3570 case Match_InvalidMemoryXExtend16:
3571 return Error(Loc,
3572 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3573 case Match_InvalidMemoryXExtend32:
3574 return Error(Loc,
3575 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3576 case Match_InvalidMemoryXExtend64:
3577 return Error(Loc,
3578 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3579 case Match_InvalidMemoryXExtend128:
3580 return Error(Loc,
3581 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3582 case Match_InvalidMemoryIndexed1:
3583 return Error(Loc, "index must be an integer in range [0, 4095].");
3584 case Match_InvalidMemoryIndexed2:
3585 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3586 case Match_InvalidMemoryIndexed4:
3587 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3588 case Match_InvalidMemoryIndexed8:
3589 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3590 case Match_InvalidMemoryIndexed16:
3591 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3592 case Match_InvalidImm0_1:
3593 return Error(Loc, "immediate must be an integer in range [0, 1].");
3594 case Match_InvalidImm0_7:
3595 return Error(Loc, "immediate must be an integer in range [0, 7].");
3596 case Match_InvalidImm0_15:
3597 return Error(Loc, "immediate must be an integer in range [0, 15].");
3598 case Match_InvalidImm0_31:
3599 return Error(Loc, "immediate must be an integer in range [0, 31].");
3600 case Match_InvalidImm0_63:
3601 return Error(Loc, "immediate must be an integer in range [0, 63].");
3602 case Match_InvalidImm0_127:
3603 return Error(Loc, "immediate must be an integer in range [0, 127].");
3604 case Match_InvalidImm0_255:
3605 return Error(Loc, "immediate must be an integer in range [0, 255].");
3606 case Match_InvalidImm0_65535:
3607 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3608 case Match_InvalidImm1_8:
3609 return Error(Loc, "immediate must be an integer in range [1, 8].");
3610 case Match_InvalidImm1_16:
3611 return Error(Loc, "immediate must be an integer in range [1, 16].");
3612 case Match_InvalidImm1_32:
3613 return Error(Loc, "immediate must be an integer in range [1, 32].");
3614 case Match_InvalidImm1_64:
3615 return Error(Loc, "immediate must be an integer in range [1, 64].");
3616 case Match_InvalidIndex1:
3617 return Error(Loc, "expected lane specifier '[1]'");
3618 case Match_InvalidIndexB:
3619 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3620 case Match_InvalidIndexH:
3621 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3622 case Match_InvalidIndexS:
3623 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3624 case Match_InvalidIndexD:
3625 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3626 case Match_InvalidLabel:
3627 return Error(Loc, "expected label or encodable integer pc offset");
3628 case Match_MRS:
3629 return Error(Loc, "expected readable system register");
3630 case Match_MSR:
3631 return Error(Loc, "expected writable system register or pstate");
3632 case Match_InvalidComplexRotationEven:
3633 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
3634 case Match_InvalidComplexRotationOdd:
3635 return Error(Loc, "complex rotation must be 90 or 270.");
3636 case Match_MnemonicFail: {
3637 std::string Suggestion = AArch64MnemonicSpellCheck(
3638 ((AArch64Operand &)*Operands[0]).getToken(),
3639 ComputeAvailableFeatures(STI->getFeatureBits()));
3640 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
3641 }
3642 case Match_InvalidSVEPattern:
3643 return Error(Loc, "invalid predicate pattern");
3644 case Match_InvalidSVEPredicateAnyReg:
3645 case Match_InvalidSVEPredicateBReg:
3646 case Match_InvalidSVEPredicateHReg:
3647 case Match_InvalidSVEPredicateSReg:
3648 case Match_InvalidSVEPredicateDReg:
3649 return Error(Loc, "invalid predicate register.");
3650 case Match_InvalidSVEPredicate3bAnyReg:
3651 case Match_InvalidSVEPredicate3bBReg:
3652 case Match_InvalidSVEPredicate3bHReg:
3653 case Match_InvalidSVEPredicate3bSReg:
3654 case Match_InvalidSVEPredicate3bDReg:
3655 return Error(Loc, "restricted predicate has range [0, 7].");
3656 default:
3657 llvm_unreachable("unexpected error code!")::llvm::llvm_unreachable_internal("unexpected error code!", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3657)
;
3658 }
3659}
3660
3661static const char *getSubtargetFeatureName(uint64_t Val);
3662
3663bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3664 OperandVector &Operands,
3665 MCStreamer &Out,
3666 uint64_t &ErrorInfo,
3667 bool MatchingInlineAsm) {
3668 assert(!Operands.empty() && "Unexpect empty operand list!")(static_cast <bool> (!Operands.empty() && "Unexpect empty operand list!"
) ? void (0) : __assert_fail ("!Operands.empty() && \"Unexpect empty operand list!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3668, __extension__ __PRETTY_FUNCTION__))
;
3669 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3670 assert(Op.isToken() && "Leading operand should always be a mnemonic!")(static_cast <bool> (Op.isToken() && "Leading operand should always be a mnemonic!"
) ? void (0) : __assert_fail ("Op.isToken() && \"Leading operand should always be a mnemonic!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3670, __extension__ __PRETTY_FUNCTION__))
;
3671
3672 StringRef Tok = Op.getToken();
3673 unsigned NumOperands = Operands.size();
3674
3675 if (NumOperands == 4 && Tok == "lsl") {
3676 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3677 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3678 if (Op2.isScalarReg() && Op3.isImm()) {
3679 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3680 if (Op3CE) {
3681 uint64_t Op3Val = Op3CE->getValue();
3682 uint64_t NewOp3Val = 0;
3683 uint64_t NewOp4Val = 0;
3684 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3685 Op2.getReg())) {
3686 NewOp3Val = (32 - Op3Val) & 0x1f;
3687 NewOp4Val = 31 - Op3Val;
3688 } else {
3689 NewOp3Val = (64 - Op3Val) & 0x3f;
3690 NewOp4Val = 63 - Op3Val;
3691 }
3692
3693 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3694 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3695
3696 Operands[0] = AArch64Operand::CreateToken(
3697 "ubfm", false, Op.getStartLoc(), getContext());
3698 Operands.push_back(AArch64Operand::CreateImm(
3699 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3700 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3701 Op3.getEndLoc(), getContext());
3702 }
3703 }
3704 } else if (NumOperands == 4 && Tok == "bfc") {
3705 // FIXME: Horrible hack to handle BFC->BFM alias.
3706 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3707 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3708 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3709
3710 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
3711 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3712 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3713
3714 if (LSBCE && WidthCE) {
3715 uint64_t LSB = LSBCE->getValue();
3716 uint64_t Width = WidthCE->getValue();
3717
3718 uint64_t RegWidth = 0;
3719 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3720 Op1.getReg()))
3721 RegWidth = 64;
3722 else
3723 RegWidth = 32;
3724
3725 if (LSB >= RegWidth)
3726 return Error(LSBOp.getStartLoc(),
3727 "expected integer in range [0, 31]");
3728 if (Width < 1 || Width > RegWidth)
3729 return Error(WidthOp.getStartLoc(),
3730 "expected integer in range [1, 32]");
3731
3732 uint64_t ImmR = 0;
3733 if (RegWidth == 32)
3734 ImmR = (32 - LSB) & 0x1f;
3735 else
3736 ImmR = (64 - LSB) & 0x3f;
3737
3738 uint64_t ImmS = Width - 1;
3739
3740 if (ImmR != 0 && ImmS >= ImmR)
3741 return Error(WidthOp.getStartLoc(),
3742 "requested insert overflows register");
3743
3744 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3745 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3746 Operands[0] = AArch64Operand::CreateToken(
3747 "bfm", false, Op.getStartLoc(), getContext());
3748 Operands[2] = AArch64Operand::CreateReg(
3749 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
3750 SMLoc(), SMLoc(), getContext());
3751 Operands[3] = AArch64Operand::CreateImm(
3752 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3753 Operands.emplace_back(
3754 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3755 WidthOp.getEndLoc(), getContext()));
3756 }
3757 }
3758 } else if (NumOperands == 5) {
3759 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3760 // UBFIZ -> UBFM aliases.
3761 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3762 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3763 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3764 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3765
3766 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
3767 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3768 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3769
3770 if (Op3CE && Op4CE) {
3771 uint64_t Op3Val = Op3CE->getValue();
3772 uint64_t Op4Val = Op4CE->getValue();
3773
3774 uint64_t RegWidth = 0;
3775 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3776 Op1.getReg()))
3777 RegWidth = 64;
3778 else
3779 RegWidth = 32;
3780
3781 if (Op3Val >= RegWidth)
3782 return Error(Op3.getStartLoc(),
3783 "expected integer in range [0, 31]");
3784 if (Op4Val < 1 || Op4Val > RegWidth)
3785 return Error(Op4.getStartLoc(),
3786 "expected integer in range [1, 32]");
3787
3788 uint64_t NewOp3Val = 0;
3789 if (RegWidth == 32)
3790 NewOp3Val = (32 - Op3Val) & 0x1f;
3791 else
3792 NewOp3Val = (64 - Op3Val) & 0x3f;
3793
3794 uint64_t NewOp4Val = Op4Val - 1;
3795
3796 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3797 return Error(Op4.getStartLoc(),
3798 "requested insert overflows register");
3799
3800 const MCExpr *NewOp3 =
3801 MCConstantExpr::create(NewOp3Val, getContext());
3802 const MCExpr *NewOp4 =
3803 MCConstantExpr::create(NewOp4Val, getContext());
3804 Operands[3] = AArch64Operand::CreateImm(
3805 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3806 Operands[4] = AArch64Operand::CreateImm(
3807 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3808 if (Tok == "bfi")
3809 Operands[0] = AArch64Operand::CreateToken(
3810 "bfm", false, Op.getStartLoc(), getContext());
3811 else if (Tok == "sbfiz")
3812 Operands[0] = AArch64Operand::CreateToken(
3813 "sbfm", false, Op.getStartLoc(), getContext());
3814 else if (Tok == "ubfiz")
3815 Operands[0] = AArch64Operand::CreateToken(
3816 "ubfm", false, Op.getStartLoc(), getContext());
3817 else
3818 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3818)
;
3819 }
3820 }
3821
3822 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3823 // UBFX -> UBFM aliases.
3824 } else if (NumOperands == 5 &&
3825 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3826 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3827 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3828 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3829
3830 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
3831 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3832 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3833
3834 if (Op3CE && Op4CE) {
3835 uint64_t Op3Val = Op3CE->getValue();
3836 uint64_t Op4Val = Op4CE->getValue();
3837
3838 uint64_t RegWidth = 0;
3839 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3840 Op1.getReg()))
3841 RegWidth = 64;
3842 else
3843 RegWidth = 32;
3844
3845 if (Op3Val >= RegWidth)
3846 return Error(Op3.getStartLoc(),
3847 "expected integer in range [0, 31]");
3848 if (Op4Val < 1 || Op4Val > RegWidth)
3849 return Error(Op4.getStartLoc(),
3850 "expected integer in range [1, 32]");
3851
3852 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3853
3854 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3855 return Error(Op4.getStartLoc(),
3856 "requested extract overflows register");
3857
3858 const MCExpr *NewOp4 =
3859 MCConstantExpr::create(NewOp4Val, getContext());
3860 Operands[4] = AArch64Operand::CreateImm(
3861 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3862 if (Tok == "bfxil")
3863 Operands[0] = AArch64Operand::CreateToken(
3864 "bfm", false, Op.getStartLoc(), getContext());
3865 else if (Tok == "sbfx")
3866 Operands[0] = AArch64Operand::CreateToken(
3867 "sbfm", false, Op.getStartLoc(), getContext());
3868 else if (Tok == "ubfx")
3869 Operands[0] = AArch64Operand::CreateToken(
3870 "ubfm", false, Op.getStartLoc(), getContext());
3871 else
3872 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3872)
;
3873 }
3874 }
3875 }
3876 }
3877
3878 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
3879 // instruction for FP registers correctly in some rare circumstances. Convert
3880 // it to a safe instruction and warn (because silently changing someone's
3881 // assembly is rude).
3882 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
3883 NumOperands == 4 && Tok == "movi") {
3884 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3885 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3886 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3887 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
3888 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
3889 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
3890 if (Suffix.lower() == ".2d" &&
3891 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
3892 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
3893 " correctly on this CPU, converting to equivalent movi.16b");
3894 // Switch the suffix to .16b.
3895 unsigned Idx = Op1.isToken() ? 1 : 2;
3896 Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
3897 getContext());
3898 }
3899 }
3900 }
3901
3902 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3903 // InstAlias can't quite handle this since the reg classes aren't
3904 // subclasses.
3905 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3906 // The source register can be Wn here, but the matcher expects a
3907 // GPR64. Twiddle it here if necessary.
3908 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3909 if (Op.isScalarReg()) {
3910 unsigned Reg = getXRegFromWReg(Op.getReg());
3911 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
3912 Op.getStartLoc(), Op.getEndLoc(),
3913 getContext());
3914 }
3915 }
3916 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3917 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3918 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3919 if (Op.isScalarReg() &&
3920 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3921 Op.getReg())) {
3922 // The source register can be Wn here, but the matcher expects a
3923 // GPR64. Twiddle it here if necessary.
3924 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3925 if (Op.isScalarReg()) {
3926 unsigned Reg = getXRegFromWReg(Op.getReg());
3927 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
3928 Op.getStartLoc(),
3929 Op.getEndLoc(), getContext());
3930 }
3931 }
3932 }
3933 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3934 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3935 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3936 if (Op.isScalarReg() &&
3937 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3938 Op.getReg())) {
3939 // The source register can be Wn here, but the matcher expects a
3940 // GPR32. Twiddle it here if necessary.
3941 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3942 if (Op.isScalarReg()) {
3943 unsigned Reg = getWRegFromXReg(Op.getReg());
3944 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
3945 Op.getStartLoc(),
3946 Op.getEndLoc(), getContext());
3947 }
3948 }
3949 }
3950
3951 MCInst Inst;
3952 // First try to match against the secondary set of tables containing the
3953 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3954 unsigned MatchResult =
3955 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3956
3957 // If that fails, try against the alternate table containing long-form NEON:
3958 // "fadd v0.2s, v1.2s, v2.2s"
3959 if (MatchResult != Match_Success) {
3960 // But first, save the short-form match result: we can use it in case the
3961 // long-form match also fails.
3962 auto ShortFormNEONErrorInfo = ErrorInfo;
3963 auto ShortFormNEONMatchResult = MatchResult;
3964
3965 MatchResult =
3966 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3967
3968 // Now, both matches failed, and the long-form match failed on the mnemonic
3969 // suffix token operand. The short-form match failure is probably more
3970 // relevant: use it instead.
3971 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
3972 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
3973 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
3974 MatchResult = ShortFormNEONMatchResult;
3975 ErrorInfo = ShortFormNEONErrorInfo;
3976 }
3977 }
3978
3979 switch (MatchResult) {
3980 case Match_Success: {
3981 // Perform range checking and other semantic validations
3982 SmallVector<SMLoc, 8> OperandLocs;
3983 NumOperands = Operands.size();
3984 for (unsigned i = 1; i < NumOperands; ++i)
3985 OperandLocs.push_back(Operands[i]->getStartLoc());
3986 if (validateInstruction(Inst, OperandLocs))
3987 return true;
3988
3989 Inst.setLoc(IDLoc);
3990 Out.EmitInstruction(Inst, getSTI());
3991 return false;
3992 }
3993 case Match_MissingFeature: {
3994 assert(ErrorInfo && "Unknown missing feature!")(static_cast <bool> (ErrorInfo && "Unknown missing feature!"
) ? void (0) : __assert_fail ("ErrorInfo && \"Unknown missing feature!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3994, __extension__ __PRETTY_FUNCTION__))
;
3995 // Special case the error message for the very common case where only
3996 // a single subtarget feature is missing (neon, e.g.).
3997 std::string Msg = "instruction requires:";
3998 uint64_t Mask = 1;
3999 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4000 if (ErrorInfo & Mask) {
4001 Msg += " ";
4002 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4003 }
4004 Mask <<= 1;
4005 }
4006 return Error(IDLoc, Msg);
4007 }
4008 case Match_MnemonicFail:
4009 return showMatchError(IDLoc, MatchResult, Operands);
4010 case Match_InvalidOperand: {
4011 SMLoc ErrorLoc = IDLoc;
4012
4013 if (ErrorInfo != ~0ULL) {
4014 if (ErrorInfo >= Operands.size())
4015 return Error(IDLoc, "too few operands for instruction",
4016 SMRange(IDLoc, getTok().getLoc()));
4017
4018 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4019 if (ErrorLoc == SMLoc())
4020 ErrorLoc = IDLoc;
4021 }
4022 // If the match failed on a suffix token operand, tweak the diagnostic
4023 // accordingly.
4024 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4025 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4026 MatchResult = Match_InvalidSuffix;
4027
4028 return showMatchError(ErrorLoc, MatchResult, Operands);
4029 }
4030 case Match_InvalidTiedOperand:
4031 case Match_InvalidMemoryIndexed1:
4032 case Match_InvalidMemoryIndexed2:
4033 case Match_InvalidMemoryIndexed4:
4034 case Match_InvalidMemoryIndexed8:
4035 case Match_InvalidMemoryIndexed16:
4036 case Match_InvalidCondCode:
4037 case Match_AddSubRegExtendSmall:
4038 case Match_AddSubRegExtendLarge:
4039 case Match_AddSubSecondSource:
4040 case Match_LogicalSecondSource:
4041 case Match_AddSubRegShift32:
4042 case Match_AddSubRegShift64:
4043 case Match_InvalidMovImm32Shift:
4044 case Match_InvalidMovImm64Shift:
4045 case Match_InvalidFPImm:
4046 case Match_InvalidMemoryWExtend8:
4047 case Match_InvalidMemoryWExtend16:
4048 case Match_InvalidMemoryWExtend32:
4049 case Match_InvalidMemoryWExtend64:
4050 case Match_InvalidMemoryWExtend128:
4051 case Match_InvalidMemoryXExtend8:
4052 case Match_InvalidMemoryXExtend16:
4053 case Match_InvalidMemoryXExtend32:
4054 case Match_InvalidMemoryXExtend64:
4055 case Match_InvalidMemoryXExtend128:
4056 case Match_InvalidMemoryIndexedSImm6:
4057 case Match_InvalidMemoryIndexed4SImm7:
4058 case Match_InvalidMemoryIndexed8SImm7:
4059 case Match_InvalidMemoryIndexed16SImm7:
4060 case Match_InvalidMemoryIndexedSImm5:
4061 case Match_InvalidMemoryIndexedSImm9:
4062 case Match_InvalidMemoryIndexedSImm10:
4063 case Match_InvalidImm0_1:
4064 case Match_InvalidImm0_7:
4065 case Match_InvalidImm0_15:
4066 case Match_InvalidImm0_31:
4067 case Match_InvalidImm0_63:
4068 case Match_InvalidImm0_127:
4069 case Match_InvalidImm0_255:
4070 case Match_InvalidImm0_65535:
4071 case Match_InvalidImm1_8:
4072 case Match_InvalidImm1_16:
4073 case Match_InvalidImm1_32:
4074 case Match_InvalidImm1_64:
4075 case Match_InvalidIndex1:
4076 case Match_InvalidIndexB:
4077 case Match_InvalidIndexH:
4078 case Match_InvalidIndexS:
4079 case Match_InvalidIndexD:
4080 case Match_InvalidLabel:
4081 case Match_InvalidComplexRotationEven:
4082 case Match_InvalidComplexRotationOdd:
4083 case Match_InvalidSVEPredicateAnyReg:
4084 case Match_InvalidSVEPattern:
4085 case Match_InvalidSVEPredicateBReg:
4086 case Match_InvalidSVEPredicateHReg:
4087 case Match_InvalidSVEPredicateSReg:
4088 case Match_InvalidSVEPredicateDReg:
4089 case Match_InvalidSVEPredicate3bAnyReg:
4090 case Match_InvalidSVEPredicate3bBReg:
4091 case Match_InvalidSVEPredicate3bHReg:
4092 case Match_InvalidSVEPredicate3bSReg:
4093 case Match_InvalidSVEPredicate3bDReg:
4094 case Match_MSR:
4095 case Match_MRS: {
4096 if (ErrorInfo >= Operands.size())
4097 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
4098 // Any time we get here, there's nothing fancy to do. Just get the
4099 // operand SMLoc and display the diagnostic.
4100 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4101 if (ErrorLoc == SMLoc())
4102 ErrorLoc = IDLoc;
4103 return showMatchError(ErrorLoc, MatchResult, Operands);
4104 }
4105 }
4106
4107 llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4107)
;
4108}
4109
4110/// ParseDirective parses the arm specific directives
4111bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4112 const MCObjectFileInfo::Environment Format =
4113 getContext().getObjectFileInfo()->getObjectFileType();
4114 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4115 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4116
4117 StringRef IDVal = DirectiveID.getIdentifier();
4118 SMLoc Loc = DirectiveID.getLoc();
4119 if (IDVal == ".arch")
4120 parseDirectiveArch(Loc);
4121 else if (IDVal == ".cpu")
4122 parseDirectiveCPU(Loc);
4123 else if (IDVal == ".hword")
4124 parseDirectiveWord(2, Loc);
4125 else if (IDVal == ".word")
4126 parseDirectiveWord(4, Loc);
4127 else if (IDVal == ".xword")
4128 parseDirectiveWord(8, Loc);
4129 else if (IDVal == ".tlsdesccall")
4130 parseDirectiveTLSDescCall(Loc);
4131 else if (IDVal == ".ltorg" || IDVal == ".pool")
4132 parseDirectiveLtorg(Loc);
4133 else if (IDVal == ".unreq")
4134 parseDirectiveUnreq(Loc);
4135 else if (!IsMachO && !IsCOFF) {
4136 if (IDVal == ".inst")
4137 parseDirectiveInst(Loc);
4138 else
4139 return true;
4140 } else if (IDVal == MCLOHDirectiveName())
4141 parseDirectiveLOH(IDVal, Loc);
4142 else
4143 return true;
4144 return false;
4145}
4146
4147static const struct {
4148 const char *Name;
4149 const FeatureBitset Features;
4150} ExtensionMap[] = {
4151 { "crc", {AArch64::FeatureCRC} },
4152 { "crypto", {AArch64::FeatureCrypto} },
4153 { "fp", {AArch64::FeatureFPARMv8} },
4154 { "simd", {AArch64::FeatureNEON} },
4155 { "ras", {AArch64::FeatureRAS} },
4156 { "lse", {AArch64::FeatureLSE} },
4157
4158 // FIXME: Unsupported extensions
4159 { "pan", {} },
4160 { "lor", {} },
4161 { "rdma", {} },
4162 { "profile", {} },
4163};
4164
4165/// parseDirectiveArch
4166/// ::= .arch token
4167bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
4168 SMLoc ArchLoc = getLoc();
4169
4170 StringRef Arch, ExtensionString;
4171 std::tie(Arch, ExtensionString) =
4172 getParser().parseStringToEndOfStatement().trim().split('+');
4173
4174 AArch64::ArchKind ID = AArch64::parseArch(Arch);
4175 if (ID == AArch64::ArchKind::INVALID)
4176 return Error(ArchLoc, "unknown arch name");
4177
4178 if (parseToken(AsmToken::EndOfStatement))
4179 return true;
4180
4181 // Get the architecture and extension features.
4182 std::vector<StringRef> AArch64Features;
4183 AArch64::getArchFeatures(ID, AArch64Features);
4184 AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
4185 AArch64Features);
4186
4187 MCSubtargetInfo &STI = copySTI();
4188 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
4189 STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
4190
4191 SmallVector<StringRef, 4> RequestedExtensions;
4192 if (!ExtensionString.empty())
4193 ExtensionString.split(RequestedExtensions, '+');
4194
4195 FeatureBitset Features = STI.getFeatureBits();
4196 for (auto Name : RequestedExtensions) {
4197 bool EnableFeature = true;
4198
4199 if (Name.startswith_lower("no")) {
4200 EnableFeature = false;
4201 Name = Name.substr(2);
4202 }
4203
4204 for (const auto &Extension : ExtensionMap) {
4205 if (Extension.Name != Name)
4206 continue;
4207
4208 if (Extension.Features.none())
4209 report_fatal_error("unsupported architectural extension: " + Name);
4210
4211 FeatureBitset ToggleFeatures = EnableFeature
4212 ? (~Features & Extension.Features)
4213 : ( Features & Extension.Features);
4214 uint64_t Features =
4215 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
4216 setAvailableFeatures(Features);
4217 break;
4218 }
4219 }
4220 return false;
4221}
4222
4223static SMLoc incrementLoc(SMLoc L, int Offset) {
4224 return SMLoc::getFromPointer(L.getPointer() + Offset);
4225}
4226
4227/// parseDirectiveCPU
4228/// ::= .cpu id
4229bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
4230 SMLoc CurLoc = getLoc();
4231
4232 StringRef CPU, ExtensionString;
4233 std::tie(CPU, ExtensionString) =
4234 getParser().parseStringToEndOfStatement().trim().split('+');
4235
4236 if (parseToken(AsmToken::EndOfStatement))
4237 return true;
4238
4239 SmallVector<StringRef, 4> RequestedExtensions;
4240 if (!ExtensionString.empty())
4241 ExtensionString.split(RequestedExtensions, '+');
4242
4243 // FIXME This is using tablegen data, but should be moved to ARMTargetParser
4244 // once that is tablegen'ed
4245 if (!getSTI().isCPUStringValid(CPU)) {
4246 Error(CurLoc, "unknown CPU name");
4247 return false;
4248 }
4249
4250 MCSubtargetInfo &STI = copySTI();
4251 STI.setDefaultFeatures(CPU, "");
4252 CurLoc = incrementLoc(CurLoc, CPU.size());
4253
4254 FeatureBitset Features = STI.getFeatureBits();
4255 for (auto Name : RequestedExtensions) {
4256 // Advance source location past '+'.
4257 CurLoc = incrementLoc(CurLoc, 1);
4258
4259 bool EnableFeature = true;
4260
4261 if (Name.startswith_lower("no")) {
4262 EnableFeature = false;
4263 Name = Name.substr(2);
4264 }
4265
4266 bool FoundExtension = false;
4267 for (const auto &Extension : ExtensionMap) {
4268 if (Extension.Name != Name)
4269 continue;
4270
4271 if (Extension.Features.none())
4272 report_fatal_error("unsupported architectural extension: " + Name);
4273
4274 FeatureBitset ToggleFeatures = EnableFeature
4275 ? (~Features & Extension.Features)
4276 : ( Features & Extension.Features);
4277 uint64_t Features =
4278 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
4279 setAvailableFeatures(Features);
4280 FoundExtension = true;
4281
4282 break;
4283 }
4284
4285 if (!FoundExtension)
4286 Error(CurLoc, "unsupported architectural extension");
4287
4288 CurLoc = incrementLoc(CurLoc, Name.size());
4289 }
4290 return false;
4291}
4292
4293/// parseDirectiveWord
4294/// ::= .word [ expression (, expression)* ]
4295bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4296 auto parseOp = [&]() -> bool {
4297 const MCExpr *Value;
4298 if (getParser().parseExpression(Value))
4299 return true;
4300 getParser().getStreamer().EmitValue(Value, Size, L);
4301 return false;
4302 };
4303
4304 if (parseMany(parseOp))
4305 return true;
4306 return false;
4307}
4308
4309/// parseDirectiveInst
4310/// ::= .inst opcode [, ...]
4311bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4312 if (getLexer().is(AsmToken::EndOfStatement))
4313 return Error(Loc, "expected expression following '.inst' directive");
4314
4315 auto parseOp = [&]() -> bool {
4316 SMLoc L = getLoc();
4317 const MCExpr *Expr;
4318 if (check(getParser().parseExpression(Expr), L, "expected expression"))
4319 return true;
4320 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4321 if (check(!Value, L, "expected constant expression"))
4322 return true;
4323 getTargetStreamer().emitInst(Value->getValue());
4324 return false;
4325 };
4326
4327 if (parseMany(parseOp))
4328 return addErrorSuffix(" in '.inst' directive");
4329 return false;
4330}
4331
4332// parseDirectiveTLSDescCall:
4333// ::= .tlsdesccall symbol
4334bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4335 StringRef Name;
4336 if (check(getParser().parseIdentifier(Name), L,
4337 "expected symbol after directive") ||
4338 parseToken(AsmToken::EndOfStatement))
4339 return true;
4340
4341 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4342 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4343 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4344
4345 MCInst Inst;
4346 Inst.setOpcode(AArch64::TLSDESCCALL);
4347 Inst.addOperand(MCOperand::createExpr(Expr));
4348
4349 getParser().getStreamer().EmitInstruction(Inst, getSTI());
4350 return false;
4351}
4352
4353/// ::= .loh <lohName | lohId> label1, ..., labelN
4354/// The number of arguments depends on the loh identifier.
4355bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4356 MCLOHType Kind;
4357 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4358 if (getParser().getTok().isNot(AsmToken::Integer))
4359 return TokError("expected an identifier or a number in directive");
4360 // We successfully get a numeric value for the identifier.
4361 // Check if it is valid.
4362 int64_t Id = getParser().getTok().getIntVal();
4363 if (Id <= -1U && !isValidMCLOHType(Id))
4364 return TokError("invalid numeric identifier in directive");
4365 Kind = (MCLOHType)Id;
4366 } else {
4367 StringRef Name = getTok().getIdentifier();
4368 // We successfully parse an identifier.
4369 // Check if it is a recognized one.
4370 int Id = MCLOHNameToId(Name);
4371
4372 if (Id == -1)
4373 return TokError("invalid identifier in directive");
4374 Kind = (MCLOHType)Id;
4375 }
4376 // Consume the identifier.
4377 Lex();
4378 // Get the number of arguments of this LOH.
4379 int NbArgs = MCLOHIdToNbArgs(Kind);
4380
4381 assert(NbArgs != -1 && "Invalid number of arguments")(static_cast <bool> (NbArgs != -1 && "Invalid number of arguments"
) ? void (0) : __assert_fail ("NbArgs != -1 && \"Invalid number of arguments\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4381, __extension__ __PRETTY_FUNCTION__))
;
4382
4383 SmallVector<MCSymbol *, 3> Args;
4384 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4385 StringRef Name;
4386 if (getParser().parseIdentifier(Name))
4387 return TokError("expected identifier in directive");
4388 Args.push_back(getContext().getOrCreateSymbol(Name));
4389
4390 if (Idx + 1 == NbArgs)
4391 break;
4392 if (parseToken(AsmToken::Comma,
4393 "unexpected token in '" + Twine(IDVal) + "' directive"))
4394 return true;
4395 }
4396 if (parseToken(AsmToken::EndOfStatement,
4397 "unexpected token in '" + Twine(IDVal) + "' directive"))
4398 return true;
4399
4400 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4401 return false;
4402}
4403
4404/// parseDirectiveLtorg
4405/// ::= .ltorg | .pool
4406bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4407 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
4408 return true;
4409 getTargetStreamer().emitCurrentConstantPool();
4410 return false;
4411}
4412
4413/// parseDirectiveReq
4414/// ::= name .req registername
4415bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4416 MCAsmParser &Parser = getParser();
4417 Parser.Lex(); // Eat the '.req' token.
4418 SMLoc SRegLoc = getLoc();
4419 int RegNum = tryParseRegister();
4420 RegKind RegisterKind = RegKind::Scalar;
4421
4422 if (RegNum == -1) {
4423 StringRef Kind;
4424 RegisterKind = RegKind::NeonVector;
4425 RegNum = tryMatchVectorRegister(Kind, false);
4426 if (!Kind.empty())
4427 return Error(SRegLoc, "vector register without type specifier expected");
4428 }
4429
4430 if (RegNum == -1) {
4431 StringRef Kind;
4432 RegisterKind = RegKind::SVEDataVector;
4433 OperandMatchResultTy Res =
4434 tryParseSVERegister(RegNum, Kind, RegKind::SVEDataVector);
4435
4436 if (Res == MatchOperand_ParseFail)
4437 return true;
4438
4439 if (Res == MatchOperand_Success && !Kind.empty())
4440 return Error(SRegLoc,
4441 "sve vector register without type specifier expected");
4442 }
4443
4444 if (RegNum == -1) {
4445 StringRef Kind;
4446 RegisterKind = RegKind::SVEPredicateVector;
4447 OperandMatchResultTy Res =
4448 tryParseSVERegister(RegNum, Kind, RegKind::SVEPredicateVector);
4449
4450 if (Res == MatchOperand_ParseFail)
4451 return true;
4452
4453 if (Res == MatchOperand_Success && !Kind.empty())
4454 return Error(SRegLoc,
4455 "sve predicate register without type specifier expected");
4456 }
4457
4458 if (RegNum == -1)
4459 return Error(SRegLoc, "register name or alias expected");
4460
4461 // Shouldn't be anything else.
4462 if (parseToken(AsmToken::EndOfStatement,
4463 "unexpected input in .req directive"))
4464 return true;
4465
4466 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
4467 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4468 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4469
4470 return false;
4471}
4472
4473/// parseDirectiveUneq
4474/// ::= .unreq registername
4475bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4476 MCAsmParser &Parser = getParser();
4477 if (getTok().isNot(AsmToken::Identifier))
4478 return TokError("unexpected input in .unreq directive.");
4479 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4480 Parser.Lex(); // Eat the identifier.
4481 if (parseToken(AsmToken::EndOfStatement))
4482 return addErrorSuffix("in '.unreq' directive");
4483 return false;
4484}
4485
4486bool
4487AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4488 AArch64MCExpr::VariantKind &ELFRefKind,
4489 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4490 int64_t &Addend) {
4491 ELFRefKind = AArch64MCExpr::VK_INVALID;
4492 DarwinRefKind = MCSymbolRefExpr::VK_None;
4493 Addend = 0;
4494
4495 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4496 ELFRefKind = AE->getKind();
4497 Expr = AE->getSubExpr();
4498 }
4499
4500 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4501 if (SE) {
4502 // It's a simple symbol reference with no addend.
4503 DarwinRefKind = SE->getKind();
4504 return true;
4505 }
4506
4507 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4508 if (!BE)
4509 return false;
4510
4511 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4512 if (!SE)
4513 return false;
4514 DarwinRefKind = SE->getKind();
4515
4516 if (BE->getOpcode() != MCBinaryExpr::Add &&
4517 BE->getOpcode() != MCBinaryExpr::Sub)
4518 return false;
4519
4520 // See if the addend is a constant, otherwise there's more going
4521 // on here than we can deal with.
4522 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4523 if (!AddendExpr)
4524 return false;
4525
4526 Addend = AddendExpr->getValue();
4527 if (BE->getOpcode() == MCBinaryExpr::Sub)
4528 Addend = -Addend;
4529
4530 // It's some symbol reference + a constant addend, but really
4531 // shouldn't use both Darwin and ELF syntax.
4532 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4533 DarwinRefKind == MCSymbolRefExpr::VK_None;
4534}
4535
4536/// Force static initialization.
4537extern "C" void LLVMInitializeAArch64AsmParser() {
4538 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
4539 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
4540 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
4541}
4542
4543#define GET_REGISTER_MATCHER
4544#define GET_SUBTARGET_FEATURE_NAME
4545#define GET_MATCHER_IMPLEMENTATION
4546#define GET_MNEMONIC_SPELL_CHECKER
4547#include "AArch64GenAsmMatcher.inc"
4548
4549// Define this matcher function after the auto-generated include so we
4550// have the match class enum definitions.
4551unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4552 unsigned Kind) {
4553 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4554 // If the kind is a token for a literal immediate, check if our asm
4555 // operand matches. This is for InstAliases which have a fixed-value
4556 // immediate in the syntax.
4557 int64_t ExpectedVal;
4558 switch (Kind) {
4559 default:
4560 return Match_InvalidOperand;
4561 case MCK__35_0:
4562 ExpectedVal = 0;
4563 break;
4564 case MCK__35_1:
4565 ExpectedVal = 1;
4566 break;
4567 case MCK__35_12:
4568 ExpectedVal = 12;
4569 break;
4570 case MCK__35_16:
4571 ExpectedVal = 16;
4572 break;
4573 case MCK__35_2:
4574 ExpectedVal = 2;
4575 break;
4576 case MCK__35_24:
4577 ExpectedVal = 24;
4578 break;
4579 case MCK__35_3:
4580 ExpectedVal = 3;
4581 break;
4582 case MCK__35_32:
4583 ExpectedVal = 32;
4584 break;
4585 case MCK__35_4:
4586 ExpectedVal = 4;
4587 break;
4588 case MCK__35_48:
4589 ExpectedVal = 48;
4590 break;
4591 case MCK__35_6:
4592 ExpectedVal = 6;
4593 break;
4594 case MCK__35_64:
4595 ExpectedVal = 64;
4596 break;
4597 case MCK__35_8:
4598 ExpectedVal = 8;
4599 break;
4600 }
4601 if (!Op.isImm())
4602 return Match_InvalidOperand;
4603 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4604 if (!CE)
4605 return Match_InvalidOperand;
4606 if (CE->getValue() == ExpectedVal)
4607 return Match_Success;
4608 return Match_InvalidOperand;
4609}
4610
4611OperandMatchResultTy
4612AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4613
4614 SMLoc S = getLoc();
4615
4616 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4617 Error(S, "expected register");
4618 return MatchOperand_ParseFail;
4619 }
4620
4621 int FirstReg = tryParseRegister();
4622 if (FirstReg == -1) {
4623 return MatchOperand_ParseFail;
4624 }
4625 const MCRegisterClass &WRegClass =
4626 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4627 const MCRegisterClass &XRegClass =
4628 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4629
4630 bool isXReg = XRegClass.contains(FirstReg),
4631 isWReg = WRegClass.contains(FirstReg);
4632 if (!isXReg && !isWReg) {
4633 Error(S, "expected first even register of a "
4634 "consecutive same-size even/odd register pair");
4635 return MatchOperand_ParseFail;
4636 }
4637
4638 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4639 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4640
4641 if (FirstEncoding & 0x1) {
4642 Error(S, "expected first even register of a "
4643 "consecutive same-size even/odd register pair");
4644 return MatchOperand_ParseFail;
4645 }
4646
4647 SMLoc M = getLoc();
4648 if (getParser().getTok().isNot(AsmToken::Comma)) {
4649 Error(M, "expected comma");
4650 return MatchOperand_ParseFail;
4651 }
4652 // Eat the comma
4653 getParser().Lex();
4654
4655 SMLoc E = getLoc();
4656 int SecondReg = tryParseRegister();
4657 if (SecondReg ==-1) {
4658 return MatchOperand_ParseFail;
4659 }
4660
4661 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4662 (isXReg && !XRegClass.contains(SecondReg)) ||
4663 (isWReg && !WRegClass.contains(SecondReg))) {
4664 Error(E,"expected second odd register of a "
4665 "consecutive same-size even/odd register pair");
4666 return MatchOperand_ParseFail;
4667 }
4668
4669 unsigned Pair = 0;
4670 if (isXReg) {
4671 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4672 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4673 } else {
4674 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4675 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4676 }
4677
4678 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
4679 getLoc(), getContext()));
4680
4681 return MatchOperand_Success;
4682}
4683
4684template <bool ParseSuffix>
4685OperandMatchResultTy
4686AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
4687 const SMLoc S = getLoc();
4688 // Check for a SVE vector register specifier first.
4689 int RegNum = -1;
4690 StringRef Kind;
4691
4692 OperandMatchResultTy Res =
4693 tryParseSVERegister(RegNum, Kind, RegKind::SVEDataVector);
4694
4695 if (Res != MatchOperand_Success)
4696 return Res;
4697
4698 if (ParseSuffix && Kind.empty())
4699 return MatchOperand_NoMatch;
4700
4701 unsigned ElementWidth = StringSwitch<unsigned>(Kind.lower())
4702 .Case("", -1)
4703 .Case(".b", 8)
4704 .Case(".h", 16)
4705 .Case(".s", 32)
4706 .Case(".d", 64)
4707 .Case(".q", 128)
4708 .Default(0);
4709 if (!ElementWidth)
4710 return MatchOperand_NoMatch;
4711
4712 Operands.push_back(
4713 AArch64Operand::CreateReg(RegNum, RegKind::SVEDataVector, ElementWidth,
4714 S, S, getContext()));
4715
4716 return MatchOperand_Success;
4717}
4718
4719OperandMatchResultTy
4720AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
4721 MCAsmParser &Parser = getParser();
4722
4723 SMLoc SS = getLoc();
4724 const AsmToken &TokE = Parser.getTok();
4725 bool IsHash = TokE.is(AsmToken::Hash);
4726
4727 if (!IsHash && TokE.isNot(AsmToken::Identifier))
4728 return MatchOperand_NoMatch;
4729
4730 int64_t Pattern;
4731 if (IsHash) {
4732 Parser.Lex(); // Eat hash
4733
4734 // Parse the immediate operand.
4735 const MCExpr *ImmVal;
4736 SS = getLoc();
4737 if (Parser.parseExpression(ImmVal))
4738 return MatchOperand_ParseFail;
4739
4740 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4741 if (!MCE)
4742 return MatchOperand_ParseFail;
4743
4744 Pattern = MCE->getValue();
4745 } else {
4746 // Parse the pattern
4747 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
4748 if (!Pat)
4749 return MatchOperand_NoMatch;
4750
4751 Parser.Lex();
4752 Pattern = Pat->Encoding;
4753 assert(Pattern >= 0 && Pattern < 32)(static_cast <bool> (Pattern >= 0 && Pattern
< 32) ? void (0) : __assert_fail ("Pattern >= 0 && Pattern < 32"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4753, __extension__ __PRETTY_FUNCTION__))
;
4754 }
4755
4756 Operands.push_back(
4757 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
4758 SS, getLoc(), getContext()));
4759
4760 return MatchOperand_Success;
4761}