Bug Summary

File:lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Warning:line 2986, column 24
1st function call argument is an uninitialized value

Annotated Source Code

/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp

1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/AArch64AddressingModes.h"
11#include "MCTargetDesc/AArch64MCExpr.h"
12#include "MCTargetDesc/AArch64MCTargetDesc.h"
13#include "MCTargetDesc/AArch64TargetStreamer.h"
14#include "Utils/AArch64BaseInfo.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/ADT/StringExtras.h"
21#include "llvm/ADT/StringMap.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/ADT/StringSwitch.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/MC/MCContext.h"
26#include "llvm/MC/MCExpr.h"
27#include "llvm/MC/MCInst.h"
28#include "llvm/MC/MCLinkerOptimizationHint.h"
29#include "llvm/MC/MCObjectFileInfo.h"
30#include "llvm/MC/MCParser/MCAsmLexer.h"
31#include "llvm/MC/MCParser/MCAsmParser.h"
32#include "llvm/MC/MCParser/MCAsmParserExtension.h"
33#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
34#include "llvm/MC/MCParser/MCTargetAsmParser.h"
35#include "llvm/MC/MCRegisterInfo.h"
36#include "llvm/MC/MCStreamer.h"
37#include "llvm/MC/MCSubtargetInfo.h"
38#include "llvm/MC/MCSymbol.h"
39#include "llvm/MC/MCTargetOptions.h"
40#include "llvm/MC/SubtargetFeature.h"
41#include "llvm/Support/Casting.h"
42#include "llvm/Support/Compiler.h"
43#include "llvm/Support/ErrorHandling.h"
44#include "llvm/Support/MathExtras.h"
45#include "llvm/Support/SMLoc.h"
46#include "llvm/Support/TargetParser.h"
47#include "llvm/Support/TargetRegistry.h"
48#include "llvm/Support/raw_ostream.h"
49#include <cassert>
50#include <cctype>
51#include <cstdint>
52#include <cstdio>
53#include <string>
54#include <tuple>
55#include <utility>
56#include <vector>
57
58using namespace llvm;
59
60namespace {
61
62enum class RegKind {Scalar, NeonVector, SVEDataVector};
63
64class AArch64AsmParser : public MCTargetAsmParser {
65private:
66 StringRef Mnemonic; ///< Instruction mnemonic.
67
68 // Map of register aliases registers via the .req directive.
69 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
70
71 AArch64TargetStreamer &getTargetStreamer() {
72 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
73 return static_cast<AArch64TargetStreamer &>(TS);
74 }
75
76 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
77
78 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
79 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
80 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
81 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
82 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
83 int tryParseRegister();
84 int tryMatchVectorRegister(StringRef &Kind, bool expected);
85 bool parseRegister(OperandVector &Operands);
86 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
87 bool parseVectorList(OperandVector &Operands);
88 bool parseOperand(OperandVector &Operands, bool isCondCode,
89 bool invertCondCode);
90
91 bool showMatchError(SMLoc Loc, unsigned ErrCode, OperandVector &Operands);
92
93 bool parseDirectiveArch(SMLoc L);
94 bool parseDirectiveCPU(SMLoc L);
95 bool parseDirectiveWord(unsigned Size, SMLoc L);
96 bool parseDirectiveInst(SMLoc L);
97
98 bool parseDirectiveTLSDescCall(SMLoc L);
99
100 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
101 bool parseDirectiveLtorg(SMLoc L);
102
103 bool parseDirectiveReq(StringRef Name, SMLoc L);
104 bool parseDirectiveUnreq(SMLoc L);
105
106 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
107 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
108 OperandVector &Operands, MCStreamer &Out,
109 uint64_t &ErrorInfo,
110 bool MatchingInlineAsm) override;
111/// @name Auto-generated Match Functions
112/// {
113
114#define GET_ASSEMBLER_HEADER
115#include "AArch64GenAsmMatcher.inc"
116
117 /// }
118
119 OperandMatchResultTy tryParseSVERegister(int &Reg, StringRef &Kind,
120 RegKind MatchKind);
121 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
122 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
123 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
124 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
125 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
126 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
127 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
128 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
129 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
130 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
131 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
132 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
133 bool tryParseNeonVectorRegister(OperandVector &Operands);
134 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
135 template <bool ParseSuffix>
136 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
137
138public:
139 enum AArch64MatchResultTy {
140 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
141#define GET_OPERAND_DIAGNOSTIC_TYPES
142#include "AArch64GenAsmMatcher.inc"
143 };
144 bool IsILP32;
145
146 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
147 const MCInstrInfo &MII, const MCTargetOptions &Options)
148 : MCTargetAsmParser(Options, STI, MII) {
149 IsILP32 = Options.getABIName() == "ilp32";
150 MCAsmParserExtension::Initialize(Parser);
151 MCStreamer &S = getParser().getStreamer();
152 if (S.getTargetStreamer() == nullptr)
153 new AArch64TargetStreamer(S);
154
155 // Initialize the set of available features.
156 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
157 }
158
159 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
160 SMLoc NameLoc, OperandVector &Operands) override;
161 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
162 bool ParseDirective(AsmToken DirectiveID) override;
163 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
164 unsigned Kind) override;
165
166 static bool classifySymbolRef(const MCExpr *Expr,
167 AArch64MCExpr::VariantKind &ELFRefKind,
168 MCSymbolRefExpr::VariantKind &DarwinRefKind,
169 int64_t &Addend);
170};
171
172/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
173/// instruction.
174class AArch64Operand : public MCParsedAsmOperand {
175private:
176 enum KindTy {
177 k_Immediate,
178 k_ShiftedImm,
179 k_CondCode,
180 k_Register,
181 k_VectorList,
182 k_VectorIndex,
183 k_Token,
184 k_SysReg,
185 k_SysCR,
186 k_Prefetch,
187 k_ShiftExtend,
188 k_FPImm,
189 k_Barrier,
190 k_PSBHint,
191 } Kind;
192
193 SMLoc StartLoc, EndLoc;
194
195 struct TokOp {
196 const char *Data;
197 unsigned Length;
198 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
199 };
200
201 struct RegOp {
202 unsigned RegNum;
203 RegKind Kind;
204
205 int ElementWidth;
206 };
207
208 struct VectorListOp {
209 unsigned RegNum;
210 unsigned Count;
211 unsigned NumElements;
212 unsigned ElementKind;
213 };
214
215 struct VectorIndexOp {
216 unsigned Val;
217 };
218
219 struct ImmOp {
220 const MCExpr *Val;
221 };
222
223 struct ShiftedImmOp {
224 const MCExpr *Val;
225 unsigned ShiftAmount;
226 };
227
228 struct CondCodeOp {
229 AArch64CC::CondCode Code;
230 };
231
232 struct FPImmOp {
233 unsigned Val; // Encoded 8-bit representation.
234 };
235
236 struct BarrierOp {
237 const char *Data;
238 unsigned Length;
239 unsigned Val; // Not the enum since not all values have names.
240 };
241
242 struct SysRegOp {
243 const char *Data;
244 unsigned Length;
245 uint32_t MRSReg;
246 uint32_t MSRReg;
247 uint32_t PStateField;
248 };
249
250 struct SysCRImmOp {
251 unsigned Val;
252 };
253
254 struct PrefetchOp {
255 const char *Data;
256 unsigned Length;
257 unsigned Val;
258 };
259
260 struct PSBHintOp {
261 const char *Data;
262 unsigned Length;
263 unsigned Val;
264 };
265
266 struct ShiftExtendOp {
267 AArch64_AM::ShiftExtendType Type;
268 unsigned Amount;
269 bool HasExplicitAmount;
270 };
271
272 struct ExtendOp {
273 unsigned Val;
274 };
275
276 union {
277 struct TokOp Tok;
278 struct RegOp Reg;
279 struct VectorListOp VectorList;
280 struct VectorIndexOp VectorIndex;
281 struct ImmOp Imm;
282 struct ShiftedImmOp ShiftedImm;
283 struct CondCodeOp CondCode;
284 struct FPImmOp FPImm;
285 struct BarrierOp Barrier;
286 struct SysRegOp SysReg;
287 struct SysCRImmOp SysCRImm;
288 struct PrefetchOp Prefetch;
289 struct PSBHintOp PSBHint;
290 struct ShiftExtendOp ShiftExtend;
291 };
292
293 // Keep the MCContext around as the MCExprs may need manipulated during
294 // the add<>Operands() calls.
295 MCContext &Ctx;
296
297public:
298 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
299
300 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
301 Kind = o.Kind;
302 StartLoc = o.StartLoc;
303 EndLoc = o.EndLoc;
304 switch (Kind) {
305 case k_Token:
306 Tok = o.Tok;
307 break;
308 case k_Immediate:
309 Imm = o.Imm;
310 break;
311 case k_ShiftedImm:
312 ShiftedImm = o.ShiftedImm;
313 break;
314 case k_CondCode:
315 CondCode = o.CondCode;
316 break;
317 case k_FPImm:
318 FPImm = o.FPImm;
319 break;
320 case k_Barrier:
321 Barrier = o.Barrier;
322 break;
323 case k_Register:
324 Reg = o.Reg;
325 break;
326 case k_VectorList:
327 VectorList = o.VectorList;
328 break;
329 case k_VectorIndex:
330 VectorIndex = o.VectorIndex;
331 break;
332 case k_SysReg:
333 SysReg = o.SysReg;
334 break;
335 case k_SysCR:
336 SysCRImm = o.SysCRImm;
337 break;
338 case k_Prefetch:
339 Prefetch = o.Prefetch;
340 break;
341 case k_PSBHint:
342 PSBHint = o.PSBHint;
343 break;
344 case k_ShiftExtend:
345 ShiftExtend = o.ShiftExtend;
346 break;
347 }
348 }
349
350 /// getStartLoc - Get the location of the first token of this operand.
351 SMLoc getStartLoc() const override { return StartLoc; }
352 /// getEndLoc - Get the location of the last token of this operand.
353 SMLoc getEndLoc() const override { return EndLoc; }
354
355 StringRef getToken() const {
356 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 356, __extension__ __PRETTY_FUNCTION__))
;
357 return StringRef(Tok.Data, Tok.Length);
358 }
359
360 bool isTokenSuffix() const {
361 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 361, __extension__ __PRETTY_FUNCTION__))
;
362 return Tok.IsSuffix;
363 }
364
365 const MCExpr *getImm() const {
366 assert(Kind == k_Immediate && "Invalid access!")(static_cast <bool> (Kind == k_Immediate && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 366, __extension__ __PRETTY_FUNCTION__))
;
367 return Imm.Val;
368 }
369
370 const MCExpr *getShiftedImmVal() const {
371 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 371, __extension__ __PRETTY_FUNCTION__))
;
372 return ShiftedImm.Val;
373 }
374
375 unsigned getShiftedImmShift() const {
376 assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 376, __extension__ __PRETTY_FUNCTION__))
;
377 return ShiftedImm.ShiftAmount;
378 }
379
380 AArch64CC::CondCode getCondCode() const {
381 assert(Kind == k_CondCode && "Invalid access!")(static_cast <bool> (Kind == k_CondCode && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 381, __extension__ __PRETTY_FUNCTION__))
;
382 return CondCode.Code;
383 }
384
385 unsigned getFPImm() const {
386 assert(Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 386, __extension__ __PRETTY_FUNCTION__))
;
387 return FPImm.Val;
388 }
389
390 unsigned getBarrier() const {
391 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 391, __extension__ __PRETTY_FUNCTION__))
;
392 return Barrier.Val;
393 }
394
395 StringRef getBarrierName() const {
396 assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 396, __extension__ __PRETTY_FUNCTION__))
;
397 return StringRef(Barrier.Data, Barrier.Length);
398 }
399
400 unsigned getReg() const override {
401 assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 401, __extension__ __PRETTY_FUNCTION__))
;
402 return Reg.RegNum;
403 }
404
405 unsigned getVectorListStart() const {
406 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 406, __extension__ __PRETTY_FUNCTION__))
;
407 return VectorList.RegNum;
408 }
409
410 unsigned getVectorListCount() const {
411 assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 411, __extension__ __PRETTY_FUNCTION__))
;
412 return VectorList.Count;
413 }
414
415 unsigned getVectorIndex() const {
416 assert(Kind == k_VectorIndex && "Invalid access!")(static_cast <bool> (Kind == k_VectorIndex && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 416, __extension__ __PRETTY_FUNCTION__))
;
417 return VectorIndex.Val;
418 }
419
420 StringRef getSysReg() const {
421 assert(Kind == k_SysReg && "Invalid access!")(static_cast <bool> (Kind == k_SysReg && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 421, __extension__ __PRETTY_FUNCTION__))
;
422 return StringRef(SysReg.Data, SysReg.Length);
423 }
424
425 unsigned getSysCR() const {
426 assert(Kind == k_SysCR && "Invalid access!")(static_cast <bool> (Kind == k_SysCR && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 426, __extension__ __PRETTY_FUNCTION__))
;
427 return SysCRImm.Val;
428 }
429
430 unsigned getPrefetch() const {
431 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 431, __extension__ __PRETTY_FUNCTION__))
;
432 return Prefetch.Val;
433 }
434
435 unsigned getPSBHint() const {
436 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 436, __extension__ __PRETTY_FUNCTION__))
;
437 return PSBHint.Val;
438 }
439
440 StringRef getPSBHintName() const {
441 assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 441, __extension__ __PRETTY_FUNCTION__))
;
442 return StringRef(PSBHint.Data, PSBHint.Length);
443 }
444
445 StringRef getPrefetchName() const {
446 assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 446, __extension__ __PRETTY_FUNCTION__))
;
447 return StringRef(Prefetch.Data, Prefetch.Length);
448 }
449
450 AArch64_AM::ShiftExtendType getShiftExtendType() const {
451 assert(Kind == k_ShiftExtend && "Invalid access!")(static_cast <bool> (Kind == k_ShiftExtend && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftExtend && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 451, __extension__ __PRETTY_FUNCTION__))
;
452 return ShiftExtend.Type;
453 }
454
455 unsigned getShiftExtendAmount() const {
456 assert(Kind == k_ShiftExtend && "Invalid access!")(static_cast <bool> (Kind == k_ShiftExtend && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftExtend && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 456, __extension__ __PRETTY_FUNCTION__))
;
457 return ShiftExtend.Amount;
458 }
459
460 bool hasShiftExtendAmount() const {
461 assert(Kind == k_ShiftExtend && "Invalid access!")(static_cast <bool> (Kind == k_ShiftExtend && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ShiftExtend && \"Invalid access!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 461, __extension__ __PRETTY_FUNCTION__))
;
462 return ShiftExtend.HasExplicitAmount;
463 }
464
465 bool isImm() const override { return Kind == k_Immediate; }
466 bool isMem() const override { return false; }
467 bool isSImm9() const {
468 if (!isImm())
469 return false;
470 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
471 if (!MCE)
472 return false;
473 int64_t Val = MCE->getValue();
474 return (Val >= -256 && Val < 256);
475 }
476 bool isSImm10s8() const {
477 if (!isImm())
478 return false;
479 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
480 if (!MCE)
481 return false;
482 int64_t Val = MCE->getValue();
483 return (Val >= -4096 && Val < 4089 && (Val & 7) == 0);
484 }
485 bool isSImm7s4() const {
486 if (!isImm())
487 return false;
488 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
489 if (!MCE)
490 return false;
491 int64_t Val = MCE->getValue();
492 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
493 }
494 bool isSImm7s8() const {
495 if (!isImm())
496 return false;
497 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
498 if (!MCE)
499 return false;
500 int64_t Val = MCE->getValue();
501 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
502 }
503 bool isSImm7s16() const {
504 if (!isImm())
505 return false;
506 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
507 if (!MCE)
508 return false;
509 int64_t Val = MCE->getValue();
510 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
511 }
512
513 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
514 AArch64MCExpr::VariantKind ELFRefKind;
515 MCSymbolRefExpr::VariantKind DarwinRefKind;
516 int64_t Addend;
517 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
518 Addend)) {
519 // If we don't understand the expression, assume the best and
520 // let the fixup and relocation code deal with it.
521 return true;
522 }
523
524 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
525 ELFRefKind == AArch64MCExpr::VK_LO12 ||
526 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
527 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
528 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
529 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
530 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
531 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
532 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
533 // Note that we don't range-check the addend. It's adjusted modulo page
534 // size when converted, so there is no "out of range" condition when using
535 // @pageoff.
536 return Addend >= 0 && (Addend % Scale) == 0;
537 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
538 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
539 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
540 return Addend == 0;
541 }
542
543 return false;
544 }
545
546 template <int Scale> bool isUImm12Offset() const {
547 if (!isImm())
548 return false;
549
550 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
551 if (!MCE)
552 return isSymbolicUImm12Offset(getImm(), Scale);
553
554 int64_t Val = MCE->getValue();
555 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
556 }
557
558 template <int N, int M>
559 bool isImmInRange() const {
560 if (!isImm())
561 return false;
562 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
563 if (!MCE)
564 return false;
565 int64_t Val = MCE->getValue();
566 return (Val >= N && Val <= M);
567 }
568
569 bool isLogicalImm32() const {
570 if (!isImm())
571 return false;
572 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
573 if (!MCE)
574 return false;
575 int64_t Val = MCE->getValue();
576 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
577 return false;
578 Val &= 0xFFFFFFFF;
579 return AArch64_AM::isLogicalImmediate(Val, 32);
580 }
581
582 bool isLogicalImm64() const {
583 if (!isImm())
584 return false;
585 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
586 if (!MCE)
587 return false;
588 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
589 }
590
591 bool isLogicalImm32Not() const {
592 if (!isImm())
593 return false;
594 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
595 if (!MCE)
596 return false;
597 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
598 return AArch64_AM::isLogicalImmediate(Val, 32);
599 }
600
601 bool isLogicalImm64Not() const {
602 if (!isImm())
603 return false;
604 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
605 if (!MCE)
606 return false;
607 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
608 }
609
610 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
611
612 bool isAddSubImm() const {
613 if (!isShiftedImm() && !isImm())
614 return false;
615
616 const MCExpr *Expr;
617
618 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
619 if (isShiftedImm()) {
620 unsigned Shift = ShiftedImm.ShiftAmount;
621 Expr = ShiftedImm.Val;
622 if (Shift != 0 && Shift != 12)
623 return false;
624 } else {
625 Expr = getImm();
626 }
627
628 AArch64MCExpr::VariantKind ELFRefKind;
629 MCSymbolRefExpr::VariantKind DarwinRefKind;
630 int64_t Addend;
631 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
632 DarwinRefKind, Addend)) {
633 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
634 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
635 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
636 || ELFRefKind == AArch64MCExpr::VK_LO12
637 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
638 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
639 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
640 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
641 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
642 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
643 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
644 }
645
646 // If it's a constant, it should be a real immediate in range:
647 if (auto *CE = dyn_cast<MCConstantExpr>(Expr))
648 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
649
650 // If it's an expression, we hope for the best and let the fixup/relocation
651 // code deal with it.
652 return true;
653 }
654
655 bool isAddSubImmNeg() const {
656 if (!isShiftedImm() && !isImm())
657 return false;
658
659 const MCExpr *Expr;
660
661 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
662 if (isShiftedImm()) {
663 unsigned Shift = ShiftedImm.ShiftAmount;
664 Expr = ShiftedImm.Val;
665 if (Shift != 0 && Shift != 12)
666 return false;
667 } else
668 Expr = getImm();
669
670 // Otherwise it should be a real negative immediate in range:
671 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
672 return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
673 }
674
675 bool isCondCode() const { return Kind == k_CondCode; }
676
677 bool isSIMDImmType10() const {
678 if (!isImm())
679 return false;
680 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
681 if (!MCE)
682 return false;
683 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
684 }
685
686 template<int N>
687 bool isBranchTarget() const {
688 if (!isImm())
689 return false;
690 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
691 if (!MCE)
692 return true;
693 int64_t Val = MCE->getValue();
694 if (Val & 0x3)
695 return false;
696 assert(N > 0 && "Branch target immediate cannot be 0 bits!")(static_cast <bool> (N > 0 && "Branch target immediate cannot be 0 bits!"
) ? void (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 696, __extension__ __PRETTY_FUNCTION__))
;
697 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
698 }
699
700 bool
701 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
702 if (!isImm())
703 return false;
704
705 AArch64MCExpr::VariantKind ELFRefKind;
706 MCSymbolRefExpr::VariantKind DarwinRefKind;
707 int64_t Addend;
708 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
709 DarwinRefKind, Addend)) {
710 return false;
711 }
712 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
713 return false;
714
715 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
716 if (ELFRefKind == AllowedModifiers[i])
717 return Addend == 0;
718 }
719
720 return false;
721 }
722
723 bool isMovZSymbolG3() const {
724 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
725 }
726
727 bool isMovZSymbolG2() const {
728 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
729 AArch64MCExpr::VK_TPREL_G2,
730 AArch64MCExpr::VK_DTPREL_G2});
731 }
732
733 bool isMovZSymbolG1() const {
734 return isMovWSymbol({
735 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
736 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
737 AArch64MCExpr::VK_DTPREL_G1,
738 });
739 }
740
741 bool isMovZSymbolG0() const {
742 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
743 AArch64MCExpr::VK_TPREL_G0,
744 AArch64MCExpr::VK_DTPREL_G0});
745 }
746
747 bool isMovKSymbolG3() const {
748 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
749 }
750
751 bool isMovKSymbolG2() const {
752 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
753 }
754
755 bool isMovKSymbolG1() const {
756 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
757 AArch64MCExpr::VK_TPREL_G1_NC,
758 AArch64MCExpr::VK_DTPREL_G1_NC});
759 }
760
761 bool isMovKSymbolG0() const {
762 return isMovWSymbol(
763 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
764 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
765 }
766
767 template<int RegWidth, int Shift>
768 bool isMOVZMovAlias() const {
769 if (!isImm()) return false;
770
771 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
772 if (!CE) return false;
773 uint64_t Value = CE->getValue();
774
775 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
776 }
777
778 template<int RegWidth, int Shift>
779 bool isMOVNMovAlias() const {
780 if (!isImm()) return false;
781
782 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
783 if (!CE) return false;
784 uint64_t Value = CE->getValue();
785
786 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
787 }
788
789 bool isFPImm() const { return Kind == k_FPImm; }
790 bool isBarrier() const { return Kind == k_Barrier; }
791 bool isSysReg() const { return Kind == k_SysReg; }
792
793 bool isMRSSystemRegister() const {
794 if (!isSysReg()) return false;
795
796 return SysReg.MRSReg != -1U;
797 }
798
799 bool isMSRSystemRegister() const {
800 if (!isSysReg()) return false;
801 return SysReg.MSRReg != -1U;
802 }
803
804 bool isSystemPStateFieldWithImm0_1() const {
805 if (!isSysReg()) return false;
806 return (SysReg.PStateField == AArch64PState::PAN ||
807 SysReg.PStateField == AArch64PState::UAO);
808 }
809
810 bool isSystemPStateFieldWithImm0_15() const {
811 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
812 return SysReg.PStateField != -1U;
813 }
814
815 bool isReg() const override {
816 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
817 }
818
819 bool isNeonVectorReg() const {
820 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
821 }
822
823 bool isNeonVectorRegLo() const {
824 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
825 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
826 Reg.RegNum);
827 }
828
829 template <unsigned Class = AArch64::ZPRRegClassID>
830 bool isSVEDataVectorReg() const {
831 return (Kind == k_Register && Reg.Kind == RegKind::SVEDataVector) &&
832 AArch64MCRegisterClasses[Class].contains(getReg());
833 }
834
835 template <int ElementWidth> bool isSVEDataVectorRegOfWidth() const {
836 return isSVEDataVectorReg() &&
837 (ElementWidth == -1 || Reg.ElementWidth == ElementWidth);
838 }
839
840 bool isGPR32as64() const {
841 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
842 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
843 }
844
845 bool isWSeqPair() const {
846 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
847 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
848 Reg.RegNum);
849 }
850
851 bool isXSeqPair() const {
852 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
853 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
854 Reg.RegNum);
855 }
856
857 bool isGPR64sp0() const {
858 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
859 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
860 }
861
862 template<int64_t Angle, int64_t Remainder>
863 bool isComplexRotation() const {
864 if (!isImm()) return false;
865
866 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
867 if (!CE) return false;
868 uint64_t Value = CE->getValue();
869
870 return (Value % Angle == Remainder && Value <= 270);
871 }
872
873 /// Is this a vector list with the type implicit (presumably attached to the
874 /// instruction itself)?
875 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
876 return Kind == k_VectorList && VectorList.Count == NumRegs &&
877 !VectorList.ElementKind;
878 }
879
880 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
881 bool isTypedVectorList() const {
882 if (Kind != k_VectorList)
883 return false;
884 if (VectorList.Count != NumRegs)
885 return false;
886 if (VectorList.ElementKind != ElementKind)
887 return false;
888 return VectorList.NumElements == NumElements;
889 }
890
891 bool isVectorIndex1() const {
892 return Kind == k_VectorIndex && VectorIndex.Val == 1;
893 }
894
895 bool isVectorIndexB() const {
896 return Kind == k_VectorIndex && VectorIndex.Val < 16;
897 }
898
899 bool isVectorIndexH() const {
900 return Kind == k_VectorIndex && VectorIndex.Val < 8;
901 }
902
903 bool isVectorIndexS() const {
904 return Kind == k_VectorIndex && VectorIndex.Val < 4;
905 }
906
907 bool isVectorIndexD() const {
908 return Kind == k_VectorIndex && VectorIndex.Val < 2;
909 }
910
911 bool isToken() const override { return Kind == k_Token; }
912
913 bool isTokenEqual(StringRef Str) const {
914 return Kind == k_Token && getToken() == Str;
915 }
916 bool isSysCR() const { return Kind == k_SysCR; }
917 bool isPrefetch() const { return Kind == k_Prefetch; }
918 bool isPSBHint() const { return Kind == k_PSBHint; }
919 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
920 bool isShifter() const {
921 if (!isShiftExtend())
922 return false;
923
924 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
925 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
926 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
927 ST == AArch64_AM::MSL);
928 }
929 bool isExtend() const {
930 if (!isShiftExtend())
931 return false;
932
933 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
934 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
935 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
936 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
937 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
938 ET == AArch64_AM::LSL) &&
939 getShiftExtendAmount() <= 4;
940 }
941
942 bool isExtend64() const {
943 if (!isExtend())
944 return false;
945 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
946 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
947 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
948 }
949
950 bool isExtendLSL64() const {
951 if (!isExtend())
952 return false;
953 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
954 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
955 ET == AArch64_AM::LSL) &&
956 getShiftExtendAmount() <= 4;
957 }
958
959 template<int Width> bool isMemXExtend() const {
960 if (!isExtend())
961 return false;
962 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
963 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
964 (getShiftExtendAmount() == Log2_32(Width / 8) ||
965 getShiftExtendAmount() == 0);
966 }
967
968 template<int Width> bool isMemWExtend() const {
969 if (!isExtend())
970 return false;
971 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
972 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
973 (getShiftExtendAmount() == Log2_32(Width / 8) ||
974 getShiftExtendAmount() == 0);
975 }
976
977 template <unsigned width>
978 bool isArithmeticShifter() const {
979 if (!isShifter())
980 return false;
981
982 // An arithmetic shifter is LSL, LSR, or ASR.
983 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
984 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
985 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
986 }
987
988 template <unsigned width>
989 bool isLogicalShifter() const {
990 if (!isShifter())
991 return false;
992
993 // A logical shifter is LSL, LSR, ASR or ROR.
994 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
995 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
996 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
997 getShiftExtendAmount() < width;
998 }
999
1000 bool isMovImm32Shifter() const {
1001 if (!isShifter())
1002 return false;
1003
1004 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1005 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1006 if (ST != AArch64_AM::LSL)
1007 return false;
1008 uint64_t Val = getShiftExtendAmount();
1009 return (Val == 0 || Val == 16);
1010 }
1011
1012 bool isMovImm64Shifter() const {
1013 if (!isShifter())
1014 return false;
1015
1016 // A MOVi shifter is LSL of 0 or 16.
1017 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1018 if (ST != AArch64_AM::LSL)
1019 return false;
1020 uint64_t Val = getShiftExtendAmount();
1021 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1022 }
1023
1024 bool isLogicalVecShifter() const {
1025 if (!isShifter())
1026 return false;
1027
1028 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1029 unsigned Shift = getShiftExtendAmount();
1030 return getShiftExtendType() == AArch64_AM::LSL &&
1031 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1032 }
1033
1034 bool isLogicalVecHalfWordShifter() const {
1035 if (!isLogicalVecShifter())
1036 return false;
1037
1038 // A logical vector shifter is a left shift by 0 or 8.
1039 unsigned Shift = getShiftExtendAmount();
1040 return getShiftExtendType() == AArch64_AM::LSL &&
1041 (Shift == 0 || Shift == 8);
1042 }
1043
1044 bool isMoveVecShifter() const {
1045 if (!isShiftExtend())
1046 return false;
1047
1048 // A logical vector shifter is a left shift by 8 or 16.
1049 unsigned Shift = getShiftExtendAmount();
1050 return getShiftExtendType() == AArch64_AM::MSL &&
1051 (Shift == 8 || Shift == 16);
1052 }
1053
1054 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1055 // to LDUR/STUR when the offset is not legal for the former but is for
1056 // the latter. As such, in addition to checking for being a legal unscaled
1057 // address, also check that it is not a legal scaled address. This avoids
1058 // ambiguity in the matcher.
1059 template<int Width>
1060 bool isSImm9OffsetFB() const {
1061 return isSImm9() && !isUImm12Offset<Width / 8>();
1062 }
1063
1064 bool isAdrpLabel() const {
1065 // Validation was handled during parsing, so we just sanity check that
1066 // something didn't go haywire.
1067 if (!isImm())
1068 return false;
1069
1070 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1071 int64_t Val = CE->getValue();
1072 int64_t Min = - (4096 * (1LL << (21 - 1)));
1073 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1074 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1075 }
1076
1077 return true;
1078 }
1079
1080 bool isAdrLabel() const {
1081 // Validation was handled during parsing, so we just sanity check that
1082 // something didn't go haywire.
1083 if (!isImm())
1084 return false;
1085
1086 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1087 int64_t Val = CE->getValue();
1088 int64_t Min = - (1LL << (21 - 1));
1089 int64_t Max = ((1LL << (21 - 1)) - 1);
1090 return Val >= Min && Val <= Max;
1091 }
1092
1093 return true;
1094 }
1095
1096 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1097 // Add as immediates when possible. Null MCExpr = 0.
1098 if (!Expr)
1099 Inst.addOperand(MCOperand::createImm(0));
1100 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1101 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1102 else
1103 Inst.addOperand(MCOperand::createExpr(Expr));
1104 }
1105
1106 void addRegOperands(MCInst &Inst, unsigned N) const {
1107 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1107, __extension__ __PRETTY_FUNCTION__))
;
1108 Inst.addOperand(MCOperand::createReg(getReg()));
1109 }
1110
1111 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1112 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1112, __extension__ __PRETTY_FUNCTION__))
;
1113 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1114, __extension__ __PRETTY_FUNCTION__))
1114 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1114, __extension__ __PRETTY_FUNCTION__))
;
1115
1116 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1117 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1118 RI->getEncodingValue(getReg()));
1119
1120 Inst.addOperand(MCOperand::createReg(Reg));
1121 }
1122
1123 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1124 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1124, __extension__ __PRETTY_FUNCTION__))
;
1125 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1126, __extension__ __PRETTY_FUNCTION__))
1126 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1126, __extension__ __PRETTY_FUNCTION__))
;
1127 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1128 }
1129
1130 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1131 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1131, __extension__ __PRETTY_FUNCTION__))
;
1132 assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1133, __extension__ __PRETTY_FUNCTION__))
1133 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64::
FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail
("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())"
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1133, __extension__ __PRETTY_FUNCTION__))
;
1134 Inst.addOperand(MCOperand::createReg(getReg()));
1135 }
1136
1137 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1138 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1138, __extension__ __PRETTY_FUNCTION__))
;
1139 Inst.addOperand(MCOperand::createReg(getReg()));
1140 }
1141
1142 template <unsigned NumRegs>
1143 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1144 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1144, __extension__ __PRETTY_FUNCTION__))
;
1145 static const unsigned FirstRegs[] = { AArch64::D0,
1146 AArch64::D0_D1,
1147 AArch64::D0_D1_D2,
1148 AArch64::D0_D1_D2_D3 };
1149 unsigned FirstReg = FirstRegs[NumRegs - 1];
1150
1151 Inst.addOperand(
1152 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1153 }
1154
1155 template <unsigned NumRegs>
1156 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1157 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1157, __extension__ __PRETTY_FUNCTION__))
;
1158 static const unsigned FirstRegs[] = { AArch64::Q0,
1159 AArch64::Q0_Q1,
1160 AArch64::Q0_Q1_Q2,
1161 AArch64::Q0_Q1_Q2_Q3 };
1162 unsigned FirstReg = FirstRegs[NumRegs - 1];
1163
1164 Inst.addOperand(
1165 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1166 }
1167
1168 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1169 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1169, __extension__ __PRETTY_FUNCTION__))
;
1170 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1171 }
1172
1173 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1174 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1174, __extension__ __PRETTY_FUNCTION__))
;
1175 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1176 }
1177
1178 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1179 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1179, __extension__ __PRETTY_FUNCTION__))
;
1180 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1181 }
1182
1183 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1184 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1184, __extension__ __PRETTY_FUNCTION__))
;
1185 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1186 }
1187
1188 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1189 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1189, __extension__ __PRETTY_FUNCTION__))
;
1190 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1191 }
1192
1193 void addImmOperands(MCInst &Inst, unsigned N) const {
1194 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1194, __extension__ __PRETTY_FUNCTION__))
;
1195 // If this is a pageoff symrefexpr with an addend, adjust the addend
1196 // to be only the page-offset portion. Otherwise, just add the expr
1197 // as-is.
1198 addExpr(Inst, getImm());
1199 }
1200
1201 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1202 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1202, __extension__ __PRETTY_FUNCTION__))
;
1203 if (isShiftedImm()) {
1204 addExpr(Inst, getShiftedImmVal());
1205 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1206 } else {
1207 addExpr(Inst, getImm());
1208 Inst.addOperand(MCOperand::createImm(0));
1209 }
1210 }
1211
1212 void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1213 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1213, __extension__ __PRETTY_FUNCTION__))
;
1214
1215 const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1216 const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1217 int64_t Val = -CE->getValue();
1218 unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1219
1220 Inst.addOperand(MCOperand::createImm(Val));
1221 Inst.addOperand(MCOperand::createImm(ShiftAmt));
1222 }
1223
1224 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1225 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1225, __extension__ __PRETTY_FUNCTION__))
;
1226 Inst.addOperand(MCOperand::createImm(getCondCode()));
1227 }
1228
1229 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1230 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1230, __extension__ __PRETTY_FUNCTION__))
;
1231 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1232 if (!MCE)
1233 addExpr(Inst, getImm());
1234 else
1235 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1236 }
1237
1238 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1239 addImmOperands(Inst, N);
1240 }
1241
1242 template<int Scale>
1243 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1244 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1244, __extension__ __PRETTY_FUNCTION__))
;
1245 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1246
1247 if (!MCE) {
1248 Inst.addOperand(MCOperand::createExpr(getImm()));
1249 return;
1250 }
1251 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1252 }
1253
1254 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1255 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1255, __extension__ __PRETTY_FUNCTION__))
;
1256 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1257 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1258 }
1259
1260 void addSImm10s8Operands(MCInst &Inst, unsigned N) const {
1261 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1261, __extension__ __PRETTY_FUNCTION__))
;
1262 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1263 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1264 }
1265
1266 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1267 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1267, __extension__ __PRETTY_FUNCTION__))
;
1268 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1269 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1270 }
1271
1272 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1273 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1273, __extension__ __PRETTY_FUNCTION__))
;
1274 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1275 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1276 }
1277
1278 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1279 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1279, __extension__ __PRETTY_FUNCTION__))
;
1280 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1281 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1282 }
1283
1284 void addImm0_1Operands(MCInst &Inst, unsigned N) const {
1285 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1285, __extension__ __PRETTY_FUNCTION__))
;
1286 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1287 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1288 }
1289
1290 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1291 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1291, __extension__ __PRETTY_FUNCTION__))
;
1292 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1293 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1294 }
1295
1296 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1297 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1297, __extension__ __PRETTY_FUNCTION__))
;
1298 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1299 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1300 }
1301
1302 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1303 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1303, __extension__ __PRETTY_FUNCTION__))
;
1304 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1305 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1306 }
1307
1308 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1309 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1309, __extension__ __PRETTY_FUNCTION__))
;
1310 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1311 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1311, __extension__ __PRETTY_FUNCTION__))
;
1312 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1313 }
1314
1315 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1316 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1316, __extension__ __PRETTY_FUNCTION__))
;
1317 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1318 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1319 }
1320
1321 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1322 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1322, __extension__ __PRETTY_FUNCTION__))
;
1323 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1324 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1325 }
1326
1327 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1328 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1328, __extension__ __PRETTY_FUNCTION__))
;
1329 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1330 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1331 }
1332
1333 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1334 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1334, __extension__ __PRETTY_FUNCTION__))
;
1335 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1336 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1337 }
1338
1339 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1340 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1340, __extension__ __PRETTY_FUNCTION__))
;
1341 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1342 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1343 }
1344
1345 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1346 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1346, __extension__ __PRETTY_FUNCTION__))
;
1347 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1348 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1349 }
1350
1351 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1352 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1352, __extension__ __PRETTY_FUNCTION__))
;
1353 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1354 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1355 }
1356
1357 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1358 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1358, __extension__ __PRETTY_FUNCTION__))
;
1359 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1360 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1361 }
1362
1363 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1364 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1364, __extension__ __PRETTY_FUNCTION__))
;
1365 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1366 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1367 }
1368
1369 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1370 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1370, __extension__ __PRETTY_FUNCTION__))
;
1371 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1372 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1373 }
1374
1375 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1376 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1376, __extension__ __PRETTY_FUNCTION__))
;
1377 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1378 uint64_t encoding =
1379 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1380 Inst.addOperand(MCOperand::createImm(encoding));
1381 }
1382
1383 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1384 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1384, __extension__ __PRETTY_FUNCTION__))
;
1385 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1386 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1387 Inst.addOperand(MCOperand::createImm(encoding));
1388 }
1389
1390 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1391 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1391, __extension__ __PRETTY_FUNCTION__))
;
1392 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1393 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1394 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1395 Inst.addOperand(MCOperand::createImm(encoding));
1396 }
1397
1398 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1399 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1399, __extension__ __PRETTY_FUNCTION__))
;
1400 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1401 uint64_t encoding =
1402 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1403 Inst.addOperand(MCOperand::createImm(encoding));
1404 }
1405
1406 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1407 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1407, __extension__ __PRETTY_FUNCTION__))
;
1408 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1409 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1410 Inst.addOperand(MCOperand::createImm(encoding));
1411 }
1412
1413 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1414 // Branch operands don't encode the low bits, so shift them off
1415 // here. If it's a label, however, just put it on directly as there's
1416 // not enough information now to do anything.
1417 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1417, __extension__ __PRETTY_FUNCTION__))
;
1418 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1419 if (!MCE) {
1420 addExpr(Inst, getImm());
1421 return;
1422 }
1423 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1423, __extension__ __PRETTY_FUNCTION__))
;
1424 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1425 }
1426
1427 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1428 // Branch operands don't encode the low bits, so shift them off
1429 // here. If it's a label, however, just put it on directly as there's
1430 // not enough information now to do anything.
1431 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1431, __extension__ __PRETTY_FUNCTION__))
;
1432 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1433 if (!MCE) {
1434 addExpr(Inst, getImm());
1435 return;
1436 }
1437 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1437, __extension__ __PRETTY_FUNCTION__))
;
1438 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1439 }
1440
1441 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1442 // Branch operands don't encode the low bits, so shift them off
1443 // here. If it's a label, however, just put it on directly as there's
1444 // not enough information now to do anything.
1445 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1445, __extension__ __PRETTY_FUNCTION__))
;
1446 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1447 if (!MCE) {
1448 addExpr(Inst, getImm());
1449 return;
1450 }
1451 assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!"
) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1451, __extension__ __PRETTY_FUNCTION__))
;
1452 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1453 }
1454
1455 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1456 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1456, __extension__ __PRETTY_FUNCTION__))
;
1457 Inst.addOperand(MCOperand::createImm(getFPImm()));
1458 }
1459
1460 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1461 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1461, __extension__ __PRETTY_FUNCTION__))
;
1462 Inst.addOperand(MCOperand::createImm(getBarrier()));
1463 }
1464
1465 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1466 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1466, __extension__ __PRETTY_FUNCTION__))
;
1467
1468 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1469 }
1470
1471 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1472 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1472, __extension__ __PRETTY_FUNCTION__))
;
1473
1474 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1475 }
1476
1477 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1478 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1478, __extension__ __PRETTY_FUNCTION__))
;
1479
1480 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1481 }
1482
1483 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1484 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1484, __extension__ __PRETTY_FUNCTION__))
;
1485
1486 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1487 }
1488
1489 void addSysCROperands(MCInst &Inst, unsigned N) const {
1490 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1490, __extension__ __PRETTY_FUNCTION__))
;
1491 Inst.addOperand(MCOperand::createImm(getSysCR()));
1492 }
1493
1494 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1495 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1495, __extension__ __PRETTY_FUNCTION__))
;
1496 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1497 }
1498
1499 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1500 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1500, __extension__ __PRETTY_FUNCTION__))
;
1501 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1502 }
1503
1504 void addShifterOperands(MCInst &Inst, unsigned N) const {
1505 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1505, __extension__ __PRETTY_FUNCTION__))
;
1506 unsigned Imm =
1507 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1508 Inst.addOperand(MCOperand::createImm(Imm));
1509 }
1510
1511 void addExtendOperands(MCInst &Inst, unsigned N) const {
1512 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1512, __extension__ __PRETTY_FUNCTION__))
;
1513 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1514 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1515 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1516 Inst.addOperand(MCOperand::createImm(Imm));
1517 }
1518
1519 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1520 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1520, __extension__ __PRETTY_FUNCTION__))
;
1521 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1522 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1523 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1524 Inst.addOperand(MCOperand::createImm(Imm));
1525 }
1526
1527 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1528 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1528, __extension__ __PRETTY_FUNCTION__))
;
1529 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1530 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1531 Inst.addOperand(MCOperand::createImm(IsSigned));
1532 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1533 }
1534
1535 // For 8-bit load/store instructions with a register offset, both the
1536 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1537 // they're disambiguated by whether the shift was explicit or implicit rather
1538 // than its size.
1539 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1540 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1540, __extension__ __PRETTY_FUNCTION__))
;
1541 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1542 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1543 Inst.addOperand(MCOperand::createImm(IsSigned));
1544 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1545 }
1546
1547 template<int Shift>
1548 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1549 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1549, __extension__ __PRETTY_FUNCTION__))
;
1550
1551 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1552 uint64_t Value = CE->getValue();
1553 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1554 }
1555
1556 template<int Shift>
1557 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1558 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1558, __extension__ __PRETTY_FUNCTION__))
;
1559
1560 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1561 uint64_t Value = CE->getValue();
1562 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1563 }
1564
1565 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1566 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1566, __extension__ __PRETTY_FUNCTION__))
;
1567 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1568 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1569 }
1570
1571 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1572 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1572, __extension__ __PRETTY_FUNCTION__))
;
1573 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1574 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1575 }
1576
1577 void print(raw_ostream &OS) const override;
1578
1579 static std::unique_ptr<AArch64Operand>
1580 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1581 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1582 Op->Tok.Data = Str.data();
1583 Op->Tok.Length = Str.size();
1584 Op->Tok.IsSuffix = IsSuffix;
1585 Op->StartLoc = S;
1586 Op->EndLoc = S;
1587 return Op;
1588 }
1589
1590 static std::unique_ptr<AArch64Operand>
1591 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx) {
1592 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1593 Op->Reg.RegNum = RegNum;
1594 Op->Reg.Kind = Kind;
1595 Op->StartLoc = S;
1596 Op->EndLoc = E;
1597 return Op;
1598 }
1599
1600 static std::unique_ptr<AArch64Operand>
1601 CreateReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1602 SMLoc S, SMLoc E, MCContext &Ctx) {
1603 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1604 Op->Reg.RegNum = RegNum;
1605 Op->Reg.ElementWidth = ElementWidth;
1606 Op->Reg.Kind = Kind;
1607 Op->StartLoc = S;
1608 Op->EndLoc = E;
1609 return Op;
1610 }
1611
1612 static std::unique_ptr<AArch64Operand>
1613 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1614 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1615 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1616 Op->VectorList.RegNum = RegNum;
1617 Op->VectorList.Count = Count;
1618 Op->VectorList.NumElements = NumElements;
1619 Op->VectorList.ElementKind = ElementKind;
1620 Op->StartLoc = S;
1621 Op->EndLoc = E;
1622 return Op;
1623 }
1624
1625 static std::unique_ptr<AArch64Operand>
1626 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1627 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1628 Op->VectorIndex.Val = Idx;
1629 Op->StartLoc = S;
1630 Op->EndLoc = E;
1631 return Op;
1632 }
1633
1634 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1635 SMLoc E, MCContext &Ctx) {
1636 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1637 Op->Imm.Val = Val;
1638 Op->StartLoc = S;
1639 Op->EndLoc = E;
1640 return Op;
1641 }
1642
1643 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1644 unsigned ShiftAmount,
1645 SMLoc S, SMLoc E,
1646 MCContext &Ctx) {
1647 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1648 Op->ShiftedImm .Val = Val;
1649 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1650 Op->StartLoc = S;
1651 Op->EndLoc = E;
1652 return Op;
1653 }
1654
1655 static std::unique_ptr<AArch64Operand>
1656 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1657 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1658 Op->CondCode.Code = Code;
1659 Op->StartLoc = S;
1660 Op->EndLoc = E;
1661 return Op;
1662 }
1663
1664 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1665 MCContext &Ctx) {
1666 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1667 Op->FPImm.Val = Val;
1668 Op->StartLoc = S;
1669 Op->EndLoc = S;
1670 return Op;
1671 }
1672
1673 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1674 StringRef Str,
1675 SMLoc S,
1676 MCContext &Ctx) {
1677 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1678 Op->Barrier.Val = Val;
1679 Op->Barrier.Data = Str.data();
1680 Op->Barrier.Length = Str.size();
1681 Op->StartLoc = S;
1682 Op->EndLoc = S;
1683 return Op;
1684 }
1685
1686 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1687 uint32_t MRSReg,
1688 uint32_t MSRReg,
1689 uint32_t PStateField,
1690 MCContext &Ctx) {
1691 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1692 Op->SysReg.Data = Str.data();
1693 Op->SysReg.Length = Str.size();
1694 Op->SysReg.MRSReg = MRSReg;
1695 Op->SysReg.MSRReg = MSRReg;
1696 Op->SysReg.PStateField = PStateField;
1697 Op->StartLoc = S;
1698 Op->EndLoc = S;
1699 return Op;
1700 }
1701
1702 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1703 SMLoc E, MCContext &Ctx) {
1704 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1705 Op->SysCRImm.Val = Val;
1706 Op->StartLoc = S;
1707 Op->EndLoc = E;
1708 return Op;
1709 }
1710
1711 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1712 StringRef Str,
1713 SMLoc S,
1714 MCContext &Ctx) {
1715 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1716 Op->Prefetch.Val = Val;
1717 Op->Barrier.Data = Str.data();
1718 Op->Barrier.Length = Str.size();
1719 Op->StartLoc = S;
1720 Op->EndLoc = S;
1721 return Op;
1722 }
1723
1724 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1725 StringRef Str,
1726 SMLoc S,
1727 MCContext &Ctx) {
1728 auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1729 Op->PSBHint.Val = Val;
1730 Op->PSBHint.Data = Str.data();
1731 Op->PSBHint.Length = Str.size();
1732 Op->StartLoc = S;
1733 Op->EndLoc = S;
1734 return Op;
1735 }
1736
1737 static std::unique_ptr<AArch64Operand>
1738 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1739 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1740 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1741 Op->ShiftExtend.Type = ShOp;
1742 Op->ShiftExtend.Amount = Val;
1743 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1744 Op->StartLoc = S;
1745 Op->EndLoc = E;
1746 return Op;
1747 }
1748};
1749
1750} // end anonymous namespace.
1751
1752void AArch64Operand::print(raw_ostream &OS) const {
1753 switch (Kind) {
1754 case k_FPImm:
1755 OS << "<fpimm " << getFPImm() << "("
1756 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1757 break;
1758 case k_Barrier: {
1759 StringRef Name = getBarrierName();
1760 if (!Name.empty())
1761 OS << "<barrier " << Name << ">";
1762 else
1763 OS << "<barrier invalid #" << getBarrier() << ">";
1764 break;
1765 }
1766 case k_Immediate:
1767 OS << *getImm();
1768 break;
1769 case k_ShiftedImm: {
1770 unsigned Shift = getShiftedImmShift();
1771 OS << "<shiftedimm ";
1772 OS << *getShiftedImmVal();
1773 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1774 break;
1775 }
1776 case k_CondCode:
1777 OS << "<condcode " << getCondCode() << ">";
1778 break;
1779 case k_Register:
1780 OS << "<register " << getReg() << ">";
1781 break;
1782 case k_VectorList: {
1783 OS << "<vectorlist ";
1784 unsigned Reg = getVectorListStart();
1785 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1786 OS << Reg + i << " ";
1787 OS << ">";
1788 break;
1789 }
1790 case k_VectorIndex:
1791 OS << "<vectorindex " << getVectorIndex() << ">";
1792 break;
1793 case k_SysReg:
1794 OS << "<sysreg: " << getSysReg() << '>';
1795 break;
1796 case k_Token:
1797 OS << "'" << getToken() << "'";
1798 break;
1799 case k_SysCR:
1800 OS << "c" << getSysCR();
1801 break;
1802 case k_Prefetch: {
1803 StringRef Name = getPrefetchName();
1804 if (!Name.empty())
1805 OS << "<prfop " << Name << ">";
1806 else
1807 OS << "<prfop invalid #" << getPrefetch() << ">";
1808 break;
1809 }
1810 case k_PSBHint:
1811 OS << getPSBHintName();
1812 break;
1813 case k_ShiftExtend:
1814 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1815 << getShiftExtendAmount();
1816 if (!hasShiftExtendAmount())
1817 OS << "<imp>";
1818 OS << '>';
1819 break;
1820 }
1821}
1822
1823/// @name Auto-generated Match Functions
1824/// {
1825
1826static unsigned MatchRegisterName(StringRef Name);
1827
1828/// }
1829
1830static unsigned MatchNeonVectorRegName(StringRef Name) {
1831 return StringSwitch<unsigned>(Name.lower())
1832 .Case("v0", AArch64::Q0)
1833 .Case("v1", AArch64::Q1)
1834 .Case("v2", AArch64::Q2)
1835 .Case("v3", AArch64::Q3)
1836 .Case("v4", AArch64::Q4)
1837 .Case("v5", AArch64::Q5)
1838 .Case("v6", AArch64::Q6)
1839 .Case("v7", AArch64::Q7)
1840 .Case("v8", AArch64::Q8)
1841 .Case("v9", AArch64::Q9)
1842 .Case("v10", AArch64::Q10)
1843 .Case("v11", AArch64::Q11)
1844 .Case("v12", AArch64::Q12)
1845 .Case("v13", AArch64::Q13)
1846 .Case("v14", AArch64::Q14)
1847 .Case("v15", AArch64::Q15)
1848 .Case("v16", AArch64::Q16)
1849 .Case("v17", AArch64::Q17)
1850 .Case("v18", AArch64::Q18)
1851 .Case("v19", AArch64::Q19)
1852 .Case("v20", AArch64::Q20)
1853 .Case("v21", AArch64::Q21)
1854 .Case("v22", AArch64::Q22)
1855 .Case("v23", AArch64::Q23)
1856 .Case("v24", AArch64::Q24)
1857 .Case("v25", AArch64::Q25)
1858 .Case("v26", AArch64::Q26)
1859 .Case("v27", AArch64::Q27)
1860 .Case("v28", AArch64::Q28)
1861 .Case("v29", AArch64::Q29)
1862 .Case("v30", AArch64::Q30)
1863 .Case("v31", AArch64::Q31)
1864 .Default(0);
1865}
1866
1867static bool isValidVectorKind(StringRef Name) {
1868 return StringSwitch<bool>(Name.lower())
1869 .Case(".8b", true)
1870 .Case(".16b", true)
1871 .Case(".4h", true)
1872 .Case(".8h", true)
1873 .Case(".2s", true)
1874 .Case(".4s", true)
1875 .Case(".1d", true)
1876 .Case(".2d", true)
1877 .Case(".1q", true)
1878 // Accept the width neutral ones, too, for verbose syntax. If those
1879 // aren't used in the right places, the token operand won't match so
1880 // all will work out.
1881 .Case(".b", true)
1882 .Case(".h", true)
1883 .Case(".s", true)
1884 .Case(".d", true)
1885 // Needed for fp16 scalar pairwise reductions
1886 .Case(".2h", true)
1887 // another special case for the ARMv8.2a dot product operand
1888 .Case(".4b", true)
1889 .Default(false);
1890}
1891
1892static unsigned matchSVEDataVectorRegName(StringRef Name) {
1893 return StringSwitch<unsigned>(Name.lower())
1894 .Case("z0", AArch64::Z0)
1895 .Case("z1", AArch64::Z1)
1896 .Case("z2", AArch64::Z2)
1897 .Case("z3", AArch64::Z3)
1898 .Case("z4", AArch64::Z4)
1899 .Case("z5", AArch64::Z5)
1900 .Case("z6", AArch64::Z6)
1901 .Case("z7", AArch64::Z7)
1902 .Case("z8", AArch64::Z8)
1903 .Case("z9", AArch64::Z9)
1904 .Case("z10", AArch64::Z10)
1905 .Case("z11", AArch64::Z11)
1906 .Case("z12", AArch64::Z12)
1907 .Case("z13", AArch64::Z13)
1908 .Case("z14", AArch64::Z14)
1909 .Case("z15", AArch64::Z15)
1910 .Case("z16", AArch64::Z16)
1911 .Case("z17", AArch64::Z17)
1912 .Case("z18", AArch64::Z18)
1913 .Case("z19", AArch64::Z19)
1914 .Case("z20", AArch64::Z20)
1915 .Case("z21", AArch64::Z21)
1916 .Case("z22", AArch64::Z22)
1917 .Case("z23", AArch64::Z23)
1918 .Case("z24", AArch64::Z24)
1919 .Case("z25", AArch64::Z25)
1920 .Case("z26", AArch64::Z26)
1921 .Case("z27", AArch64::Z27)
1922 .Case("z28", AArch64::Z28)
1923 .Case("z29", AArch64::Z29)
1924 .Case("z30", AArch64::Z30)
1925 .Case("z31", AArch64::Z31)
1926 .Default(0);
1927}
1928
1929static bool isValidSVEKind(StringRef Name) {
1930 return StringSwitch<bool>(Name.lower())
1931 .Case(".b", true)
1932 .Case(".h", true)
1933 .Case(".s", true)
1934 .Case(".d", true)
1935 .Case(".q", true)
1936 .Default(false);
1937}
1938
1939static bool isSVEDataVectorRegister(StringRef Name) {
1940 return Name[0] == 'z';
1941}
1942
1943static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1944 char &ElementKind) {
1945 assert(isValidVectorKind(Name))(static_cast <bool> (isValidVectorKind(Name)) ? void (0
) : __assert_fail ("isValidVectorKind(Name)", "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 1945, __extension__ __PRETTY_FUNCTION__))
;
1946
1947 ElementKind = Name.lower()[Name.size() - 1];
1948 NumElements = 0;
1949
1950 if (Name.size() == 2)
1951 return;
1952
1953 // Parse the lane count
1954 Name = Name.drop_front();
1955 while (isdigit(Name.front())) {
1956 NumElements = 10 * NumElements + (Name.front() - '0');
1957 Name = Name.drop_front();
1958 }
1959}
1960
1961bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1962 SMLoc &EndLoc) {
1963 StartLoc = getLoc();
1964 RegNo = tryParseRegister();
1965 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1966 return (RegNo == (unsigned)-1);
1967}
1968
1969// Matches a register name or register alias previously defined by '.req'
1970unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1971 RegKind Kind) {
1972 unsigned RegNum;
1973 switch (Kind) {
1974 case RegKind::Scalar:
1975 RegNum = MatchRegisterName(Name);
1976 break;
1977 case RegKind::NeonVector:
1978 RegNum = MatchNeonVectorRegName(Name);
1979 break;
1980 case RegKind::SVEDataVector:
1981 RegNum = matchSVEDataVectorRegName(Name);
1982 break;
1983 }
1984
1985 if (!RegNum) {
1986 // Check for aliases registered via .req. Canonicalize to lower case.
1987 // That's more consistent since register names are case insensitive, and
1988 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1989 auto Entry = RegisterReqs.find(Name.lower());
1990 if (Entry == RegisterReqs.end())
1991 return 0;
1992
1993 // set RegNum if the match is the right kind of register
1994 if (Kind == Entry->getValue().first)
1995 RegNum = Entry->getValue().second;
1996 }
1997 return RegNum;
1998}
1999
2000/// tryParseRegister - Try to parse a register name. The token must be an
2001/// Identifier when called, and if it is a register name the token is eaten and
2002/// the register is added to the operand list.
2003int AArch64AsmParser::tryParseRegister() {
2004 MCAsmParser &Parser = getParser();
2005 const AsmToken &Tok = Parser.getTok();
2006 if (Tok.isNot(AsmToken::Identifier))
2007 return -1;
2008
2009 std::string lowerCase = Tok.getString().lower();
2010 if (isSVEDataVectorRegister(lowerCase))
2011 return -1;
2012
2013 unsigned RegNum = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2014 // Also handle a few aliases of registers.
2015 if (RegNum == 0)
2016 RegNum = StringSwitch<unsigned>(lowerCase)
2017 .Case("fp", AArch64::FP)
2018 .Case("lr", AArch64::LR)
2019 .Case("x31", AArch64::XZR)
2020 .Case("w31", AArch64::WZR)
2021 .Default(0);
2022
2023 if (RegNum == 0)
2024 return -1;
2025
2026 Parser.Lex(); // Eat identifier token.
2027 return RegNum;
2028}
2029
2030/// tryMatchVectorRegister - Try to parse a vector register name with optional
2031/// kind specifier. If it is a register specifier, eat the token and return it.
2032int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
2033 MCAsmParser &Parser = getParser();
2034 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2035 TokError("vector register expected");
2036 return -1;
2037 }
2038
2039 StringRef Name = Parser.getTok().getString();
2040 // If there is a kind specifier, it's separated from the register name by
2041 // a '.'.
2042 size_t Start = 0, Next = Name.find('.');
2043 StringRef Head = Name.slice(Start, Next);
2044 unsigned RegNum = matchRegisterNameAlias(Head, RegKind::NeonVector);
2045
2046 if (RegNum) {
2047 if (Next != StringRef::npos) {
2048 Kind = Name.slice(Next, StringRef::npos);
2049 if (!isValidVectorKind(Kind)) {
2050 TokError("invalid vector kind qualifier");
2051 return -1;
2052 }
2053 }
2054 Parser.Lex(); // Eat the register token.
2055 return RegNum;
2056 }
2057
2058 if (expected)
2059 TokError("vector register expected");
2060 return -1;
2061}
2062
2063/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2064OperandMatchResultTy
2065AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2066 MCAsmParser &Parser = getParser();
2067 SMLoc S = getLoc();
2068
2069 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2070 Error(S, "Expected cN operand where 0 <= N <= 15");
2071 return MatchOperand_ParseFail;
2072 }
2073
2074 StringRef Tok = Parser.getTok().getIdentifier();
2075 if (Tok[0] != 'c' && Tok[0] != 'C') {
2076 Error(S, "Expected cN operand where 0 <= N <= 15");
2077 return MatchOperand_ParseFail;
2078 }
2079
2080 uint32_t CRNum;
2081 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2082 if (BadNum || CRNum > 15) {
2083 Error(S, "Expected cN operand where 0 <= N <= 15");
2084 return MatchOperand_ParseFail;
2085 }
2086
2087 Parser.Lex(); // Eat identifier token.
2088 Operands.push_back(
2089 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2090 return MatchOperand_Success;
2091}
2092
2093/// tryParsePrefetch - Try to parse a prefetch operand.
2094OperandMatchResultTy
2095AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2096 MCAsmParser &Parser = getParser();
2097 SMLoc S = getLoc();
2098 const AsmToken &Tok = Parser.getTok();
2099 // Either an identifier for named values or a 5-bit immediate.
2100 // Eat optional hash.
2101 if (parseOptionalToken(AsmToken::Hash) ||
2102 Tok.is(AsmToken::Integer)) {
2103 const MCExpr *ImmVal;
2104 if (getParser().parseExpression(ImmVal))
2105 return MatchOperand_ParseFail;
2106
2107 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2108 if (!MCE) {
2109 TokError("immediate value expected for prefetch operand");
2110 return MatchOperand_ParseFail;
2111 }
2112 unsigned prfop = MCE->getValue();
2113 if (prfop > 31) {
2114 TokError("prefetch operand out of range, [0,31] expected");
2115 return MatchOperand_ParseFail;
2116 }
2117
2118 auto PRFM = AArch64PRFM::lookupPRFMByEncoding(MCE->getValue());
2119 Operands.push_back(AArch64Operand::CreatePrefetch(
2120 prfop, PRFM ? PRFM->Name : "", S, getContext()));
2121 return MatchOperand_Success;
2122 }
2123
2124 if (Tok.isNot(AsmToken::Identifier)) {
2125 TokError("pre-fetch hint expected");
2126 return MatchOperand_ParseFail;
2127 }
2128
2129 auto PRFM = AArch64PRFM::lookupPRFMByName(Tok.getString());
2130 if (!PRFM) {
2131 TokError("pre-fetch hint expected");
2132 return MatchOperand_ParseFail;
2133 }
2134
2135 Parser.Lex(); // Eat identifier token.
2136 Operands.push_back(AArch64Operand::CreatePrefetch(
2137 PRFM->Encoding, Tok.getString(), S, getContext()));
2138 return MatchOperand_Success;
2139}
2140
2141/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2142OperandMatchResultTy
2143AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2144 MCAsmParser &Parser = getParser();
2145 SMLoc S = getLoc();
2146 const AsmToken &Tok = Parser.getTok();
2147 if (Tok.isNot(AsmToken::Identifier)) {
2148 TokError("invalid operand for instruction");
2149 return MatchOperand_ParseFail;
2150 }
2151
2152 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2153 if (!PSB) {
2154 TokError("invalid operand for instruction");
2155 return MatchOperand_ParseFail;
2156 }
2157
2158 Parser.Lex(); // Eat identifier token.
2159 Operands.push_back(AArch64Operand::CreatePSBHint(
2160 PSB->Encoding, Tok.getString(), S, getContext()));
2161 return MatchOperand_Success;
2162}
2163
2164/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2165/// instruction.
2166OperandMatchResultTy
2167AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2168 MCAsmParser &Parser = getParser();
2169 SMLoc S = getLoc();
2170 const MCExpr *Expr;
2171
2172 if (Parser.getTok().is(AsmToken::Hash)) {
2173 Parser.Lex(); // Eat hash token.
2174 }
2175
2176 if (parseSymbolicImmVal(Expr))
2177 return MatchOperand_ParseFail;
2178
2179 AArch64MCExpr::VariantKind ELFRefKind;
2180 MCSymbolRefExpr::VariantKind DarwinRefKind;
2181 int64_t Addend;
2182 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2183 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2184 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2185 // No modifier was specified at all; this is the syntax for an ELF basic
2186 // ADRP relocation (unfortunately).
2187 Expr =
2188 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2189 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2190 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2191 Addend != 0) {
2192 Error(S, "gotpage label reference not allowed an addend");
2193 return MatchOperand_ParseFail;
2194 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2195 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2196 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2197 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2198 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2199 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2200 // The operand must be an @page or @gotpage qualified symbolref.
2201 Error(S, "page or gotpage label reference expected");
2202 return MatchOperand_ParseFail;
2203 }
2204 }
2205
2206 // We have either a label reference possibly with addend or an immediate. The
2207 // addend is a raw value here. The linker will adjust it to only reference the
2208 // page.
2209 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2210 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2211
2212 return MatchOperand_Success;
2213}
2214
2215/// tryParseAdrLabel - Parse and validate a source label for the ADR
2216/// instruction.
2217OperandMatchResultTy
2218AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2219 SMLoc S = getLoc();
2220 const MCExpr *Expr;
2221
2222 parseOptionalToken(AsmToken::Hash);
2223 if (getParser().parseExpression(Expr))
2224 return MatchOperand_ParseFail;
2225
2226 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2227 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2228
2229 return MatchOperand_Success;
2230}
2231
2232/// tryParseFPImm - A floating point immediate expression operand.
2233OperandMatchResultTy
2234AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2235 MCAsmParser &Parser = getParser();
2236 SMLoc S = getLoc();
2237
2238 bool Hash = parseOptionalToken(AsmToken::Hash);
2239
2240 // Handle negation, as that still comes through as a separate token.
2241 bool isNegative = parseOptionalToken(AsmToken::Minus);
2242
2243 const AsmToken &Tok = Parser.getTok();
2244 if (Tok.is(AsmToken::Real) || Tok.is(AsmToken::Integer)) {
2245 int64_t Val;
2246 if (Tok.is(AsmToken::Integer) && !isNegative && Tok.getString().startswith("0x")) {
2247 Val = Tok.getIntVal();
2248 if (Val > 255 || Val < 0) {
2249 TokError("encoded floating point value out of range");
2250 return MatchOperand_ParseFail;
2251 }
2252 } else {
2253 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
2254 if (isNegative)
2255 RealVal.changeSign();
2256
2257 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2258 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2259
2260 // Check for out of range values. As an exception we let Zero through,
2261 // but as tokens instead of an FPImm so that it can be matched by the
2262 // appropriate alias if one exists.
2263 if (RealVal.isPosZero()) {
2264 Parser.Lex(); // Eat the token.
2265 Operands.push_back(AArch64Operand::CreateToken("#0", false, S, getContext()));
2266 Operands.push_back(AArch64Operand::CreateToken(".0", false, S, getContext()));
2267 return MatchOperand_Success;
2268 } else if (Val == -1) {
2269 TokError("expected compatible register or floating-point constant");
2270 return MatchOperand_ParseFail;
2271 }
2272 }
2273 Parser.Lex(); // Eat the token.
2274 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2275 return MatchOperand_Success;
2276 }
2277
2278 if (!Hash)
2279 return MatchOperand_NoMatch;
2280
2281 TokError("invalid floating point immediate");
2282 return MatchOperand_ParseFail;
2283}
2284
2285/// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2286OperandMatchResultTy
2287AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2288 MCAsmParser &Parser = getParser();
2289 SMLoc S = getLoc();
2290
2291 if (Parser.getTok().is(AsmToken::Hash))
2292 Parser.Lex(); // Eat '#'
2293 else if (Parser.getTok().isNot(AsmToken::Integer))
2294 // Operand should start from # or should be integer, emit error otherwise.
2295 return MatchOperand_NoMatch;
2296
2297 const MCExpr *Imm;
2298 if (parseSymbolicImmVal(Imm))
2299 return MatchOperand_ParseFail;
2300 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2301 uint64_t ShiftAmount = 0;
2302 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2303 if (MCE) {
2304 int64_t Val = MCE->getValue();
2305 if (Val > 0xfff && (Val & 0xfff) == 0) {
2306 Imm = MCConstantExpr::create(Val >> 12, getContext());
2307 ShiftAmount = 12;
2308 }
2309 }
2310 SMLoc E = Parser.getTok().getLoc();
2311 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2312 getContext()));
2313 return MatchOperand_Success;
2314 }
2315
2316 // Eat ','
2317 Parser.Lex();
2318
2319 // The optional operand must be "lsl #N" where N is non-negative.
2320 if (!Parser.getTok().is(AsmToken::Identifier) ||
2321 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2322 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2323 return MatchOperand_ParseFail;
2324 }
2325
2326 // Eat 'lsl'
2327 Parser.Lex();
2328
2329 parseOptionalToken(AsmToken::Hash);
2330
2331 if (Parser.getTok().isNot(AsmToken::Integer)) {
2332 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2333 return MatchOperand_ParseFail;
2334 }
2335
2336 int64_t ShiftAmount = Parser.getTok().getIntVal();
2337
2338 if (ShiftAmount < 0) {
2339 Error(Parser.getTok().getLoc(), "positive shift amount required");
2340 return MatchOperand_ParseFail;
2341 }
2342 Parser.Lex(); // Eat the number
2343
2344 SMLoc E = Parser.getTok().getLoc();
2345 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2346 S, E, getContext()));
2347 return MatchOperand_Success;
2348}
2349
2350/// parseCondCodeString - Parse a Condition Code string.
2351AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2352 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2353 .Case("eq", AArch64CC::EQ)
2354 .Case("ne", AArch64CC::NE)
2355 .Case("cs", AArch64CC::HS)
2356 .Case("hs", AArch64CC::HS)
2357 .Case("cc", AArch64CC::LO)
2358 .Case("lo", AArch64CC::LO)
2359 .Case("mi", AArch64CC::MI)
2360 .Case("pl", AArch64CC::PL)
2361 .Case("vs", AArch64CC::VS)
2362 .Case("vc", AArch64CC::VC)
2363 .Case("hi", AArch64CC::HI)
2364 .Case("ls", AArch64CC::LS)
2365 .Case("ge", AArch64CC::GE)
2366 .Case("lt", AArch64CC::LT)
2367 .Case("gt", AArch64CC::GT)
2368 .Case("le", AArch64CC::LE)
2369 .Case("al", AArch64CC::AL)
2370 .Case("nv", AArch64CC::NV)
2371 .Default(AArch64CC::Invalid);
2372 return CC;
2373}
2374
2375/// parseCondCode - Parse a Condition Code operand.
2376bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2377 bool invertCondCode) {
2378 MCAsmParser &Parser = getParser();
2379 SMLoc S = getLoc();
2380 const AsmToken &Tok = Parser.getTok();
2381 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")(static_cast <bool> (Tok.is(AsmToken::Identifier) &&
"Token is not an Identifier") ? void (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2381, __extension__ __PRETTY_FUNCTION__))
;
2382
2383 StringRef Cond = Tok.getString();
2384 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2385 if (CC == AArch64CC::Invalid)
2386 return TokError("invalid condition code");
2387 Parser.Lex(); // Eat identifier token.
2388
2389 if (invertCondCode) {
2390 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2391 return TokError("condition codes AL and NV are invalid for this instruction");
2392 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2393 }
2394
2395 Operands.push_back(
2396 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2397 return false;
2398}
2399
2400/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2401/// them if present.
2402OperandMatchResultTy
2403AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2404 MCAsmParser &Parser = getParser();
2405 const AsmToken &Tok = Parser.getTok();
2406 std::string LowerID = Tok.getString().lower();
2407 AArch64_AM::ShiftExtendType ShOp =
2408 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2409 .Case("lsl", AArch64_AM::LSL)
2410 .Case("lsr", AArch64_AM::LSR)
2411 .Case("asr", AArch64_AM::ASR)
2412 .Case("ror", AArch64_AM::ROR)
2413 .Case("msl", AArch64_AM::MSL)
2414 .Case("uxtb", AArch64_AM::UXTB)
2415 .Case("uxth", AArch64_AM::UXTH)
2416 .Case("uxtw", AArch64_AM::UXTW)
2417 .Case("uxtx", AArch64_AM::UXTX)
2418 .Case("sxtb", AArch64_AM::SXTB)
2419 .Case("sxth", AArch64_AM::SXTH)
2420 .Case("sxtw", AArch64_AM::SXTW)
2421 .Case("sxtx", AArch64_AM::SXTX)
2422 .Default(AArch64_AM::InvalidShiftExtend);
2423
2424 if (ShOp == AArch64_AM::InvalidShiftExtend)
2425 return MatchOperand_NoMatch;
2426
2427 SMLoc S = Tok.getLoc();
2428 Parser.Lex();
2429
2430 bool Hash = parseOptionalToken(AsmToken::Hash);
2431
2432 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2433 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2434 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2435 ShOp == AArch64_AM::MSL) {
2436 // We expect a number here.
2437 TokError("expected #imm after shift specifier");
2438 return MatchOperand_ParseFail;
2439 }
2440
2441 // "extend" type operations don't need an immediate, #0 is implicit.
2442 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2443 Operands.push_back(
2444 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2445 return MatchOperand_Success;
2446 }
2447
2448 // Make sure we do actually have a number, identifier or a parenthesized
2449 // expression.
2450 SMLoc E = Parser.getTok().getLoc();
2451 if (!Parser.getTok().is(AsmToken::Integer) &&
2452 !Parser.getTok().is(AsmToken::LParen) &&
2453 !Parser.getTok().is(AsmToken::Identifier)) {
2454 Error(E, "expected integer shift amount");
2455 return MatchOperand_ParseFail;
2456 }
2457
2458 const MCExpr *ImmVal;
2459 if (getParser().parseExpression(ImmVal))
2460 return MatchOperand_ParseFail;
2461
2462 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2463 if (!MCE) {
2464 Error(E, "expected constant '#imm' after shift specifier");
2465 return MatchOperand_ParseFail;
2466 }
2467
2468 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2469 Operands.push_back(AArch64Operand::CreateShiftExtend(
2470 ShOp, MCE->getValue(), true, S, E, getContext()));
2471 return MatchOperand_Success;
2472}
2473
2474static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2475 if (FBS[AArch64::HasV8_1aOps])
2476 Str += "ARMv8.1a";
2477 else if (FBS[AArch64::HasV8_2aOps])
2478 Str += "ARMv8.2a";
2479 else
2480 Str += "(unknown)";
2481}
2482
2483void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2484 SMLoc S) {
2485 const uint16_t Op2 = Encoding & 7;
2486 const uint16_t Cm = (Encoding & 0x78) >> 3;
2487 const uint16_t Cn = (Encoding & 0x780) >> 7;
2488 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2489
2490 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2491
2492 Operands.push_back(
2493 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2494 Operands.push_back(
2495 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2496 Operands.push_back(
2497 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2498 Expr = MCConstantExpr::create(Op2, getContext());
2499 Operands.push_back(
2500 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2501}
2502
2503/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2504/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2505bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2506 OperandVector &Operands) {
2507 if (Name.find('.') != StringRef::npos)
2508 return TokError("invalid operand");
2509
2510 Mnemonic = Name;
2511 Operands.push_back(
2512 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2513
2514 MCAsmParser &Parser = getParser();
2515 const AsmToken &Tok = Parser.getTok();
2516 StringRef Op = Tok.getString();
2517 SMLoc S = Tok.getLoc();
2518
2519 if (Mnemonic == "ic") {
2520 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2521 if (!IC)
2522 return TokError("invalid operand for IC instruction");
2523 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2524 std::string Str("IC " + std::string(IC->Name) + " requires ");
2525 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2526 return TokError(Str.c_str());
2527 }
2528 createSysAlias(IC->Encoding, Operands, S);
2529 } else if (Mnemonic == "dc") {
2530 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2531 if (!DC)
2532 return TokError("invalid operand for DC instruction");
2533 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2534 std::string Str("DC " + std::string(DC->Name) + " requires ");
2535 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
2536 return TokError(Str.c_str());
2537 }
2538 createSysAlias(DC->Encoding, Operands, S);
2539 } else if (Mnemonic == "at") {
2540 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2541 if (!AT)
2542 return TokError("invalid operand for AT instruction");
2543 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2544 std::string Str("AT " + std::string(AT->Name) + " requires ");
2545 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
2546 return TokError(Str.c_str());
2547 }
2548 createSysAlias(AT->Encoding, Operands, S);
2549 } else if (Mnemonic == "tlbi") {
2550 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2551 if (!TLBI)
2552 return TokError("invalid operand for TLBI instruction");
2553 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2554 std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2555 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
2556 return TokError(Str.c_str());
2557 }
2558 createSysAlias(TLBI->Encoding, Operands, S);
2559 }
2560
2561 Parser.Lex(); // Eat operand.
2562
2563 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2564 bool HasRegister = false;
2565
2566 // Check for the optional register operand.
2567 if (parseOptionalToken(AsmToken::Comma)) {
2568 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2569 return TokError("expected register operand");
2570 HasRegister = true;
2571 }
2572
2573 if (ExpectRegister && !HasRegister)
2574 return TokError("specified " + Mnemonic + " op requires a register");
2575 else if (!ExpectRegister && HasRegister)
2576 return TokError("specified " + Mnemonic + " op does not use a register");
2577
2578 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2579 return true;
2580
2581 return false;
2582}
2583
2584OperandMatchResultTy
2585AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2586 MCAsmParser &Parser = getParser();
2587 const AsmToken &Tok = Parser.getTok();
2588
2589 // Can be either a #imm style literal or an option name
2590 if (parseOptionalToken(AsmToken::Hash) ||
2591 Tok.is(AsmToken::Integer)) {
2592 // Immediate operand.
2593 const MCExpr *ImmVal;
2594 SMLoc ExprLoc = getLoc();
2595 if (getParser().parseExpression(ImmVal))
2596 return MatchOperand_ParseFail;
2597 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2598 if (!MCE) {
2599 Error(ExprLoc, "immediate value expected for barrier operand");
2600 return MatchOperand_ParseFail;
2601 }
2602 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2603 Error(ExprLoc, "barrier operand out of range");
2604 return MatchOperand_ParseFail;
2605 }
2606 auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
2607 Operands.push_back(AArch64Operand::CreateBarrier(
2608 MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
2609 return MatchOperand_Success;
2610 }
2611
2612 if (Tok.isNot(AsmToken::Identifier)) {
2613 TokError("invalid operand for instruction");
2614 return MatchOperand_ParseFail;
2615 }
2616
2617 // The only valid named option for ISB is 'sy'
2618 auto DB = AArch64DB::lookupDBByName(Tok.getString());
2619 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
2620 TokError("'sy' or #imm operand expected");
2621 return MatchOperand_ParseFail;
2622 } else if (!DB) {
2623 TokError("invalid barrier option name");
2624 return MatchOperand_ParseFail;
2625 }
2626
2627 Operands.push_back(AArch64Operand::CreateBarrier(
2628 DB->Encoding, Tok.getString(), getLoc(), getContext()));
2629 Parser.Lex(); // Consume the option
2630
2631 return MatchOperand_Success;
2632}
2633
2634OperandMatchResultTy
2635AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2636 MCAsmParser &Parser = getParser();
2637 const AsmToken &Tok = Parser.getTok();
2638
2639 if (Tok.isNot(AsmToken::Identifier))
2640 return MatchOperand_NoMatch;
2641
2642 int MRSReg, MSRReg;
2643 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
2644 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
2645 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
2646 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
2647 } else
2648 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
2649
2650 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
2651 unsigned PStateImm = -1;
2652 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
2653 PStateImm = PState->Encoding;
2654
2655 Operands.push_back(
2656 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
2657 PStateImm, getContext()));
2658 Parser.Lex(); // Eat identifier
2659
2660 return MatchOperand_Success;
2661}
2662
2663/// tryParseNeonVectorRegister - Parse a vector register operand.
2664bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
2665 MCAsmParser &Parser = getParser();
2666 if (Parser.getTok().isNot(AsmToken::Identifier))
2667 return true;
2668
2669 SMLoc S = getLoc();
2670 // Check for a vector register specifier first.
2671 StringRef Kind;
2672 int64_t Reg = tryMatchVectorRegister(Kind, false);
2673 if (Reg == -1)
2674 return true;
2675 Operands.push_back(
2676 AArch64Operand::CreateReg(Reg, RegKind::NeonVector, S, getLoc(),
2677 getContext()));
2678
2679 // If there was an explicit qualifier, that goes on as a literal text
2680 // operand.
2681 if (!Kind.empty())
2682 Operands.push_back(
2683 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2684
2685 // If there is an index specifier following the register, parse that too.
2686 SMLoc SIdx = getLoc();
2687 if (parseOptionalToken(AsmToken::LBrac)) {
2688 const MCExpr *ImmVal;
2689 if (getParser().parseExpression(ImmVal))
2690 return false;
2691 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2692 if (!MCE) {
2693 TokError("immediate value expected for vector index");
2694 return false;
2695 }
2696
2697 SMLoc E = getLoc();
2698
2699 if (parseToken(AsmToken::RBrac, "']' expected"))
2700 return false;
2701
2702 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2703 E, getContext()));
2704 }
2705
2706 return false;
2707}
2708
2709// tryParseSVEDataVectorRegister - Try to parse a SVE vector register name with
2710// optional kind specifier. If it is a register specifier, eat the token
2711// and return it.
2712OperandMatchResultTy
2713AArch64AsmParser::tryParseSVERegister(int &Reg, StringRef &Kind,
2714 RegKind MatchKind) {
2715 MCAsmParser &Parser = getParser();
2716 const AsmToken &Tok = Parser.getTok();
2717
2718 if (Tok.isNot(AsmToken::Identifier))
2719 return MatchOperand_NoMatch;
2720
2721 StringRef Name = Tok.getString();
2722 // If there is a kind specifier, it's separated from the register name by
2723 // a '.'.
2724 size_t Start = 0, Next = Name.find('.');
2725 StringRef Head = Name.slice(Start, Next);
2726 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
2727
2728 if (RegNum) {
2729 if (Next != StringRef::npos) {
2730 Kind = Name.slice(Next, StringRef::npos);
2731 if (!isValidSVEKind(Kind)) {
2732 TokError("invalid sve vector kind qualifier");
2733 return MatchOperand_ParseFail;
2734 }
2735 }
2736 Parser.Lex(); // Eat the register token.
2737
2738 Reg = RegNum;
2739 return MatchOperand_Success;
2740 }
2741
2742 return MatchOperand_NoMatch;
2743}
2744
2745/// parseRegister - Parse a non-vector register operand.
2746bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2747 SMLoc S = getLoc();
2748 // Try for a vector (neon) register.
2749 if (!tryParseNeonVectorRegister(Operands))
2750 return false;
2751
2752 // Try for a scalar register.
2753 int64_t Reg = tryParseRegister();
2754 if (Reg == -1)
2755 return true;
2756 Operands.push_back(AArch64Operand::CreateReg(Reg, RegKind::Scalar, S,
2757 getLoc(), getContext()));
2758
2759 return false;
2760}
2761
2762bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2763 MCAsmParser &Parser = getParser();
31
Calling 'MCAsmParserExtension::getParser'
32
Returning from 'MCAsmParserExtension::getParser'
59
Calling 'MCAsmParserExtension::getParser'
60
Returning from 'MCAsmParserExtension::getParser'
89
Calling 'MCAsmParserExtension::getParser'
90
Returning from 'MCAsmParserExtension::getParser'
119
Calling 'MCAsmParserExtension::getParser'
120
Returning from 'MCAsmParserExtension::getParser'
2764 bool HasELFModifier = false;
2765 AArch64MCExpr::VariantKind RefKind;
2766
2767 if (parseOptionalToken(AsmToken::Colon)) {
33
Calling 'MCAsmParserExtension::parseOptionalToken'
36
Returning from 'MCAsmParserExtension::parseOptionalToken'
37
Assuming the condition is false
38
Taking false branch
61
Calling 'MCAsmParserExtension::parseOptionalToken'
64
Returning from 'MCAsmParserExtension::parseOptionalToken'
65
Assuming the condition is false
66
Taking false branch
91
Calling 'MCAsmParserExtension::parseOptionalToken'
94
Returning from 'MCAsmParserExtension::parseOptionalToken'
95
Assuming the condition is false
96
Taking false branch
121
Calling 'MCAsmParserExtension::parseOptionalToken'
124
Returning from 'MCAsmParserExtension::parseOptionalToken'
125
Assuming the condition is true
126
Taking true branch
2768 HasELFModifier = true;
2769
2770 if (Parser.getTok().isNot(AsmToken::Identifier))
127
Calling 'AsmToken::isNot'
129
Returning from 'AsmToken::isNot'
130
Taking true branch
2771 return TokError("expect relocation specifier in operand after ':'");
131
Calling constructor for 'Twine'
138
Returning from constructor for 'Twine'
139
Calling 'MCAsmParserExtension::TokError'
142
Returning from 'MCAsmParserExtension::TokError'
2772
2773 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2774 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2775 .Case("lo12", AArch64MCExpr::VK_LO12)
2776 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2777 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2778 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2779 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2780 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2781 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2782 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2783 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2784 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2785 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2786 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2787 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2788 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2789 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2790 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2791 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2792 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2793 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2794 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2795 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2796 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2797 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2798 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2799 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2800 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2801 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2802 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2803 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2804 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2805 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2806 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2807 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2808 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2809 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2810 .Default(AArch64MCExpr::VK_INVALID);
2811
2812 if (RefKind == AArch64MCExpr::VK_INVALID)
2813 return TokError("expect relocation specifier in operand after ':'");
2814
2815 Parser.Lex(); // Eat identifier
2816
2817 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
2818 return true;
2819 }
2820
2821 if (getParser().parseExpression(ImmVal))
39
Calling 'MCAsmParserExtension::getParser'
40
Returning from 'MCAsmParserExtension::getParser'
41
Assuming the condition is false
42
Taking false branch
67
Calling 'MCAsmParserExtension::getParser'
68
Returning from 'MCAsmParserExtension::getParser'
69
Assuming the condition is false
70
Taking false branch
97
Calling 'MCAsmParserExtension::getParser'
98
Returning from 'MCAsmParserExtension::getParser'
99
Assuming the condition is false
100
Taking false branch
2822 return true;
2823
2824 if (HasELFModifier)
43
Taking false branch
71
Taking false branch
101
Taking false branch
2825 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2826
2827 return false;
2828}
2829
2830/// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2831bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2832 MCAsmParser &Parser = getParser();
2833 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket")(static_cast <bool> (Parser.getTok().is(AsmToken::LCurly
) && "Token is not a Left Bracket") ? void (0) : __assert_fail
("Parser.getTok().is(AsmToken::LCurly) && \"Token is not a Left Bracket\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 2833, __extension__ __PRETTY_FUNCTION__))
;
2834 SMLoc S = getLoc();
2835 Parser.Lex(); // Eat left bracket token.
2836 StringRef Kind;
2837 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2838 if (FirstReg == -1)
2839 return true;
2840 int64_t PrevReg = FirstReg;
2841 unsigned Count = 1;
2842
2843 if (parseOptionalToken(AsmToken::Minus)) {
2844 SMLoc Loc = getLoc();
2845 StringRef NextKind;
2846 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2847 if (Reg == -1)
2848 return true;
2849 // Any Kind suffices must match on all regs in the list.
2850 if (Kind != NextKind)
2851 return Error(Loc, "mismatched register size suffix");
2852
2853 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2854
2855 if (Space == 0 || Space > 3) {
2856 return Error(Loc, "invalid number of vectors");
2857 }
2858
2859 Count += Space;
2860 }
2861 else {
2862 while (parseOptionalToken(AsmToken::Comma)) {
2863 SMLoc Loc = getLoc();
2864 StringRef NextKind;
2865 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2866 if (Reg == -1)
2867 return true;
2868 // Any Kind suffices must match on all regs in the list.
2869 if (Kind != NextKind)
2870 return Error(Loc, "mismatched register size suffix");
2871
2872 // Registers must be incremental (with wraparound at 31)
2873 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2874 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2875 return Error(Loc, "registers must be sequential");
2876
2877 PrevReg = Reg;
2878 ++Count;
2879 }
2880 }
2881
2882 if (parseToken(AsmToken::RCurly, "'}' expected"))
2883 return true;
2884
2885 if (Count > 4)
2886 return Error(S, "invalid number of vectors");
2887
2888 unsigned NumElements = 0;
2889 char ElementKind = 0;
2890 if (!Kind.empty())
2891 parseValidVectorKind(Kind, NumElements, ElementKind);
2892
2893 Operands.push_back(AArch64Operand::CreateVectorList(
2894 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2895
2896 // If there is an index specifier following the list, parse that too.
2897 SMLoc SIdx = getLoc();
2898 if (parseOptionalToken(AsmToken::LBrac)) { // Eat left bracket token.
2899 const MCExpr *ImmVal;
2900 if (getParser().parseExpression(ImmVal))
2901 return false;
2902 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2903 if (!MCE) {
2904 TokError("immediate value expected for vector index");
2905 return false;
2906 }
2907
2908 SMLoc E = getLoc();
2909 if (parseToken(AsmToken::RBrac, "']' expected"))
2910 return false;
2911
2912 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2913 E, getContext()));
2914 }
2915 return false;
2916}
2917
2918OperandMatchResultTy
2919AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2920 MCAsmParser &Parser = getParser();
2921 const AsmToken &Tok = Parser.getTok();
2922 if (!Tok.is(AsmToken::Identifier))
2923 return MatchOperand_NoMatch;
2924
2925 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), RegKind::Scalar);
2926
2927 MCContext &Ctx = getContext();
2928 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2929 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2930 return MatchOperand_NoMatch;
2931
2932 SMLoc S = getLoc();
2933 Parser.Lex(); // Eat register
2934
2935 if (!parseOptionalToken(AsmToken::Comma)) {
2936 Operands.push_back(
2937 AArch64Operand::CreateReg(RegNum, RegKind::Scalar, S, getLoc(), Ctx));
2938 return MatchOperand_Success;
2939 }
2940
2941 parseOptionalToken(AsmToken::Hash);
2942
2943 if (Parser.getTok().isNot(AsmToken::Integer)) {
2944 Error(getLoc(), "index must be absent or #0");
2945 return MatchOperand_ParseFail;
2946 }
2947
2948 const MCExpr *ImmVal;
2949 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2950 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2951 Error(getLoc(), "index must be absent or #0");
2952 return MatchOperand_ParseFail;
2953 }
2954
2955 Operands.push_back(
2956 AArch64Operand::CreateReg(RegNum, RegKind::Scalar, S, getLoc(), Ctx));
2957 return MatchOperand_Success;
2958}
2959
2960/// parseOperand - Parse a arm instruction operand. For now this parses the
2961/// operand regardless of the mnemonic.
2962bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2963 bool invertCondCode) {
2964 MCAsmParser &Parser = getParser();
2965 // Check if the current operand has a custom associated parser, if so, try to
2966 // custom parse the operand, or fallback to the general approach.
2967 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2968 if (ResTy == MatchOperand_Success)
27
Taking false branch
55
Taking false branch
83
Assuming 'ResTy' is not equal to MatchOperand_Success
84
Taking false branch
112
Assuming 'ResTy' is not equal to MatchOperand_Success
113
Taking false branch
2969 return false;
2970 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2971 // there was a match, but an error occurred, in which case, just return that
2972 // the operand parsing failed.
2973 if (ResTy == MatchOperand_ParseFail)
28
Taking false branch
56
Taking false branch
85
Assuming 'ResTy' is not equal to MatchOperand_ParseFail
86
Taking false branch
114
Assuming 'ResTy' is not equal to MatchOperand_ParseFail
115
Taking false branch
2974 return true;
2975
2976 // Nothing custom, so do general case parsing.
2977 SMLoc S, E;
2978 switch (getLexer().getKind()) {
29
Control jumps to the 'default' case at line 2979
57
Control jumps to the 'default' case at line 2979
87
Control jumps to the 'default' case at line 2979
116
Control jumps to the 'default' case at line 2979
2979 default: {
2980 SMLoc S = getLoc();
2981 const MCExpr *Expr;
117
'Expr' declared without an initial value
2982 if (parseSymbolicImmVal(Expr))
30
Calling 'AArch64AsmParser::parseSymbolicImmVal'
44
Returning from 'AArch64AsmParser::parseSymbolicImmVal'
45
Taking false branch
58
Calling 'AArch64AsmParser::parseSymbolicImmVal'
72
Returning from 'AArch64AsmParser::parseSymbolicImmVal'
73
Taking false branch
88
Calling 'AArch64AsmParser::parseSymbolicImmVal'
102
Returning from 'AArch64AsmParser::parseSymbolicImmVal'
103
Taking false branch
118
Calling 'AArch64AsmParser::parseSymbolicImmVal'
143
Returning from 'AArch64AsmParser::parseSymbolicImmVal'
144
Assuming the condition is false
145
Taking false branch
2983 return Error(S, "invalid operand");
2984
2985 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2986 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
146
1st function call argument is an uninitialized value
2987 return false;
2988 }
2989 case AsmToken::LBrac: {
2990 SMLoc Loc = Parser.getTok().getLoc();
2991 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2992 getContext()));
2993 Parser.Lex(); // Eat '['
2994
2995 // There's no comma after a '[', so we can parse the next operand
2996 // immediately.
2997 return parseOperand(Operands, false, false);
2998 }
2999 case AsmToken::LCurly:
3000 return parseVectorList(Operands);
3001 case AsmToken::Identifier: {
3002 // If we're expecting a Condition Code operand, then just parse that.
3003 if (isCondCode)
3004 return parseCondCode(Operands, invertCondCode);
3005
3006 // If it's a register name, parse it.
3007 if (!parseRegister(Operands))
3008 return false;
3009
3010 // This could be an optional "shift" or "extend" operand.
3011 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3012 // We can only continue if no tokens were eaten.
3013 if (GotShift != MatchOperand_NoMatch)
3014 return GotShift;
3015
3016 // This was not a register so parse other operands that start with an
3017 // identifier (like labels) as expressions and create them as immediates.
3018 const MCExpr *IdVal;
3019 S = getLoc();
3020 if (getParser().parseExpression(IdVal))
3021 return true;
3022 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3023 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3024 return false;
3025 }
3026 case AsmToken::Integer:
3027 case AsmToken::Real:
3028 case AsmToken::Hash: {
3029 // #42 -> immediate.
3030 S = getLoc();
3031
3032 parseOptionalToken(AsmToken::Hash);
3033
3034 // Parse a negative sign
3035 bool isNegative = false;
3036 if (Parser.getTok().is(AsmToken::Minus)) {
3037 isNegative = true;
3038 // We need to consume this token only when we have a Real, otherwise
3039 // we let parseSymbolicImmVal take care of it
3040 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3041 Parser.Lex();
3042 }
3043
3044 // The only Real that should come through here is a literal #0.0 for
3045 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3046 // so convert the value.
3047 const AsmToken &Tok = Parser.getTok();
3048 if (Tok.is(AsmToken::Real)) {
3049 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3050 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3051 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3052 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3053 Mnemonic != "fcmlt")
3054 return TokError("unexpected floating point literal");
3055 else if (IntVal != 0 || isNegative)
3056 return TokError("expected floating-point constant #0.0");
3057 Parser.Lex(); // Eat the token.
3058
3059 Operands.push_back(
3060 AArch64Operand::CreateToken("#0", false, S, getContext()));
3061 Operands.push_back(
3062 AArch64Operand::CreateToken(".0", false, S, getContext()));
3063 return false;
3064 }
3065
3066 const MCExpr *ImmVal;
3067 if (parseSymbolicImmVal(ImmVal))
3068 return true;
3069
3070 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3071 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3072 return false;
3073 }
3074 case AsmToken::Equal: {
3075 SMLoc Loc = getLoc();
3076 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3077 return TokError("unexpected token in operand");
3078 Parser.Lex(); // Eat '='
3079 const MCExpr *SubExprVal;
3080 if (getParser().parseExpression(SubExprVal))
3081 return true;
3082
3083 if (Operands.size() < 2 ||
3084 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3085 return Error(Loc, "Only valid when first operand is register");
3086
3087 bool IsXReg =
3088 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3089 Operands[1]->getReg());
3090
3091 MCContext& Ctx = getContext();
3092 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3093 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3094 if (isa<MCConstantExpr>(SubExprVal)) {
3095 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3096 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3097 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3098 ShiftAmt += 16;
3099 Imm >>= 16;
3100 }
3101 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3102 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3103 Operands.push_back(AArch64Operand::CreateImm(
3104 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3105 if (ShiftAmt)
3106 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3107 ShiftAmt, true, S, E, Ctx));
3108 return false;
3109 }
3110 APInt Simm = APInt(64, Imm << ShiftAmt);
3111 // check if the immediate is an unsigned or signed 32-bit int for W regs
3112 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3113 return Error(Loc, "Immediate too large for register");
3114 }
3115 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3116 const MCExpr *CPLoc =
3117 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3118 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3119 return false;
3120 }
3121 }
3122}
3123
3124/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3125/// operands.
3126bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3127 StringRef Name, SMLoc NameLoc,
3128 OperandVector &Operands) {
3129 MCAsmParser &Parser = getParser();
3130 Name = StringSwitch<StringRef>(Name.lower())
3131 .Case("beq", "b.eq")
3132 .Case("bne", "b.ne")
3133 .Case("bhs", "b.hs")
3134 .Case("bcs", "b.cs")
3135 .Case("blo", "b.lo")
3136 .Case("bcc", "b.cc")
3137 .Case("bmi", "b.mi")
3138 .Case("bpl", "b.pl")
3139 .Case("bvs", "b.vs")
3140 .Case("bvc", "b.vc")
3141 .Case("bhi", "b.hi")
3142 .Case("bls", "b.ls")
3143 .Case("bge", "b.ge")
3144 .Case("blt", "b.lt")
3145 .Case("bgt", "b.gt")
3146 .Case("ble", "b.le")
3147 .Case("bal", "b.al")
3148 .Case("bnv", "b.nv")
3149 .Default(Name);
3150
3151 // First check for the AArch64-specific .req directive.
3152 if (Parser.getTok().is(AsmToken::Identifier) &&
1
Taking false branch
3153 Parser.getTok().getIdentifier() == ".req") {
3154 parseDirectiveReq(Name, NameLoc);
3155 // We always return 'error' for this, as we're done with this
3156 // statement and don't need to match the 'instruction."
3157 return true;
3158 }
3159
3160 // Create the leading tokens for the mnemonic, split by '.' characters.
3161 size_t Start = 0, Next = Name.find('.');
3162 StringRef Head = Name.slice(Start, Next);
3163
3164 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3165 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
2
Assuming the condition is false
3
Assuming the condition is false
4
Assuming the condition is false
5
Assuming the condition is false
6
Taking false branch
3166 return parseSysAlias(Head, NameLoc, Operands);
3167
3168 Operands.push_back(
3169 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3170 Mnemonic = Head;
3171
3172 // Handle condition codes for a branch mnemonic
3173 if (Head == "b" && Next != StringRef::npos) {
7
Assuming the condition is false
8
Taking false branch
3174 Start = Next;
3175 Next = Name.find('.', Start + 1);
3176 Head = Name.slice(Start + 1, Next);
3177
3178 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3179 (Head.data() - Name.data()));
3180 AArch64CC::CondCode CC = parseCondCodeString(Head);
3181 if (CC == AArch64CC::Invalid)
3182 return Error(SuffixLoc, "invalid condition code");
3183 Operands.push_back(
3184 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3185 Operands.push_back(
3186 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3187 }
3188
3189 // Add the remaining tokens in the mnemonic.
3190 while (Next != StringRef::npos) {
9
Assuming 'Next' is equal to 'npos'
10
Loop condition is false. Execution continues on line 3203
3191 Start = Next;
3192 Next = Name.find('.', Start + 1);
3193 Head = Name.slice(Start, Next);
3194 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3195 (Head.data() - Name.data()) + 1);
3196 Operands.push_back(
3197 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3198 }
3199
3200 // Conditional compare instructions have a Condition Code operand, which needs
3201 // to be parsed and an immediate operand created.
3202 bool condCodeFourthOperand =
3203 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
11
Assuming the condition is false
12
Assuming the condition is false
13
Assuming the condition is false
3204 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
14
Assuming the condition is false
15
Assuming the condition is false
16
Assuming the condition is false
3205 Head == "csinc" || Head == "csinv" || Head == "csneg");
17
Assuming the condition is false
18
Assuming the condition is false
3206
3207 // These instructions are aliases to some of the conditional select
3208 // instructions. However, the condition code is inverted in the aliased
3209 // instruction.
3210 //
3211 // FIXME: Is this the correct way to handle these? Or should the parser
3212 // generate the aliased instructions directly?
3213 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
19
Assuming the condition is false
3214 bool condCodeThirdOperand =
3215 (Head == "cinc" || Head == "cinv" || Head == "cneg");
20
Assuming the condition is false
21
Assuming the condition is false
3216
3217 // Read the remaining operands.
3218 if (getLexer().isNot(AsmToken::EndOfStatement)) {
22
Taking true branch
3219 // Read the first operand.
3220 if (parseOperand(Operands, false, false)) {
23
Taking false branch
3221 return true;
3222 }
3223
3224 unsigned N = 2;
3225 while (parseOptionalToken(AsmToken::Comma)) {
24
Loop condition is true. Entering loop body
52
Loop condition is true. Entering loop body
80
Loop condition is true. Entering loop body
110
Loop condition is true. Entering loop body
3226 // Parse and remember the operand.
3227 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
26
Calling 'AArch64AsmParser::parseOperand'
46
Returning from 'AArch64AsmParser::parseOperand'
47
Taking false branch
54
Calling 'AArch64AsmParser::parseOperand'
74
Returning from 'AArch64AsmParser::parseOperand'
75
Taking false branch
81
Assuming the condition is false
82
Calling 'AArch64AsmParser::parseOperand'
104
Returning from 'AArch64AsmParser::parseOperand'
105
Taking false branch
111
Calling 'AArch64AsmParser::parseOperand'
3228 (N == 3 && condCodeThirdOperand) ||
53
Assuming 'condCodeThirdOperand' is 0
3229 (N == 2 && condCodeSecondOperand),
3230 condCodeSecondOperand || condCodeThirdOperand)) {
25
Assuming 'condCodeSecondOperand' is 0
3231 return true;
3232 }
3233
3234 // After successfully parsing some operands there are two special cases to
3235 // consider (i.e. notional operands not separated by commas). Both are due
3236 // to memory specifiers:
3237 // + An RBrac will end an address for load/store/prefetch
3238 // + An '!' will indicate a pre-indexed operation.
3239 //
3240 // It's someone else's responsibility to make sure these tokens are sane
3241 // in the given context!
3242
3243 SMLoc RLoc = Parser.getTok().getLoc();
3244 if (parseOptionalToken(AsmToken::RBrac))
48
Assuming the condition is false
49
Taking false branch
76
Assuming the condition is false
77
Taking false branch
106
Assuming the condition is false
107
Taking false branch
3245 Operands.push_back(
3246 AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3247 SMLoc ELoc = Parser.getTok().getLoc();
3248 if (parseOptionalToken(AsmToken::Exclaim))
50
Assuming the condition is false
51
Taking false branch
78
Assuming the condition is false
79
Taking false branch
108
Assuming the condition is false
109
Taking false branch
3249 Operands.push_back(
3250 AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3251
3252 ++N;
3253 }
3254 }
3255
3256 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3257 return true;
3258
3259 return false;
3260}
3261
3262// FIXME: This entire function is a giant hack to provide us with decent
3263// operand range validation/diagnostics until TableGen/MC can be extended
3264// to support autogeneration of this kind of validation.
3265bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3266 SmallVectorImpl<SMLoc> &Loc) {
3267 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3268 // Check for indexed addressing modes w/ the base register being the
3269 // same as a destination/source register or pair load where
3270 // the Rt == Rt2. All of those are undefined behaviour.
3271 switch (Inst.getOpcode()) {
3272 case AArch64::LDPSWpre:
3273 case AArch64::LDPWpost:
3274 case AArch64::LDPWpre:
3275 case AArch64::LDPXpost:
3276 case AArch64::LDPXpre: {
3277 unsigned Rt = Inst.getOperand(1).getReg();
3278 unsigned Rt2 = Inst.getOperand(2).getReg();
3279 unsigned Rn = Inst.getOperand(3).getReg();
3280 if (RI->isSubRegisterEq(Rn, Rt))
3281 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3282 "is also a destination");
3283 if (RI->isSubRegisterEq(Rn, Rt2))
3284 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3285 "is also a destination");
3286 LLVM_FALLTHROUGH[[clang::fallthrough]];
3287 }
3288 case AArch64::LDPDi:
3289 case AArch64::LDPQi:
3290 case AArch64::LDPSi:
3291 case AArch64::LDPSWi:
3292 case AArch64::LDPWi:
3293 case AArch64::LDPXi: {
3294 unsigned Rt = Inst.getOperand(0).getReg();
3295 unsigned Rt2 = Inst.getOperand(1).getReg();
3296 if (Rt == Rt2)
3297 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3298 break;
3299 }
3300 case AArch64::LDPDpost:
3301 case AArch64::LDPDpre:
3302 case AArch64::LDPQpost:
3303 case AArch64::LDPQpre:
3304 case AArch64::LDPSpost:
3305 case AArch64::LDPSpre:
3306 case AArch64::LDPSWpost: {
3307 unsigned Rt = Inst.getOperand(1).getReg();
3308 unsigned Rt2 = Inst.getOperand(2).getReg();
3309 if (Rt == Rt2)
3310 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3311 break;
3312 }
3313 case AArch64::STPDpost:
3314 case AArch64::STPDpre:
3315 case AArch64::STPQpost:
3316 case AArch64::STPQpre:
3317 case AArch64::STPSpost:
3318 case AArch64::STPSpre:
3319 case AArch64::STPWpost:
3320 case AArch64::STPWpre:
3321 case AArch64::STPXpost:
3322 case AArch64::STPXpre: {
3323 unsigned Rt = Inst.getOperand(1).getReg();
3324 unsigned Rt2 = Inst.getOperand(2).getReg();
3325 unsigned Rn = Inst.getOperand(3).getReg();
3326 if (RI->isSubRegisterEq(Rn, Rt))
3327 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3328 "is also a source");
3329 if (RI->isSubRegisterEq(Rn, Rt2))
3330 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3331 "is also a source");
3332 break;
3333 }
3334 case AArch64::LDRBBpre:
3335 case AArch64::LDRBpre:
3336 case AArch64::LDRHHpre:
3337 case AArch64::LDRHpre:
3338 case AArch64::LDRSBWpre:
3339 case AArch64::LDRSBXpre:
3340 case AArch64::LDRSHWpre:
3341 case AArch64::LDRSHXpre:
3342 case AArch64::LDRSWpre:
3343 case AArch64::LDRWpre:
3344 case AArch64::LDRXpre:
3345 case AArch64::LDRBBpost:
3346 case AArch64::LDRBpost:
3347 case AArch64::LDRHHpost:
3348 case AArch64::LDRHpost:
3349 case AArch64::LDRSBWpost:
3350 case AArch64::LDRSBXpost:
3351 case AArch64::LDRSHWpost:
3352 case AArch64::LDRSHXpost:
3353 case AArch64::LDRSWpost:
3354 case AArch64::LDRWpost:
3355 case AArch64::LDRXpost: {
3356 unsigned Rt = Inst.getOperand(1).getReg();
3357 unsigned Rn = Inst.getOperand(2).getReg();
3358 if (RI->isSubRegisterEq(Rn, Rt))
3359 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3360 "is also a source");
3361 break;
3362 }
3363 case AArch64::STRBBpost:
3364 case AArch64::STRBpost:
3365 case AArch64::STRHHpost:
3366 case AArch64::STRHpost:
3367 case AArch64::STRWpost:
3368 case AArch64::STRXpost:
3369 case AArch64::STRBBpre:
3370 case AArch64::STRBpre:
3371 case AArch64::STRHHpre:
3372 case AArch64::STRHpre:
3373 case AArch64::STRWpre:
3374 case AArch64::STRXpre: {
3375 unsigned Rt = Inst.getOperand(1).getReg();
3376 unsigned Rn = Inst.getOperand(2).getReg();
3377 if (RI->isSubRegisterEq(Rn, Rt))
3378 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3379 "is also a source");
3380 break;
3381 }
3382 }
3383
3384 // Now check immediate ranges. Separate from the above as there is overlap
3385 // in the instructions being checked and this keeps the nested conditionals
3386 // to a minimum.
3387 switch (Inst.getOpcode()) {
3388 case AArch64::ADDSWri:
3389 case AArch64::ADDSXri:
3390 case AArch64::ADDWri:
3391 case AArch64::ADDXri:
3392 case AArch64::SUBSWri:
3393 case AArch64::SUBSXri:
3394 case AArch64::SUBWri:
3395 case AArch64::SUBXri: {
3396 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3397 // some slight duplication here.
3398 if (Inst.getOperand(2).isExpr()) {
3399 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3400 AArch64MCExpr::VariantKind ELFRefKind;
3401 MCSymbolRefExpr::VariantKind DarwinRefKind;
3402 int64_t Addend;
3403 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3404
3405 // Only allow these with ADDXri.
3406 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3407 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3408 Inst.getOpcode() == AArch64::ADDXri)
3409 return false;
3410
3411 // Only allow these with ADDXri/ADDWri
3412 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3413 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3414 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3415 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3416 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3417 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3418 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3419 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3420 (Inst.getOpcode() == AArch64::ADDXri ||
3421 Inst.getOpcode() == AArch64::ADDWri))
3422 return false;
3423
3424 // Don't allow symbol refs in the immediate field otherwise
3425 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
3426 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
3427 // 'cmp w0, 'borked')
3428 return Error(Loc.back(), "invalid immediate expression");
3429 }
3430 // We don't validate more complex expressions here
3431 }
3432 return false;
3433 }
3434 default:
3435 return false;
3436 }
3437}
3438
3439static std::string AArch64MnemonicSpellCheck(StringRef S, uint64_t FBS,
3440 unsigned VariantID = 0);
3441
3442bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
3443 OperandVector &Operands) {
3444 switch (ErrCode) {
3445 case Match_MissingFeature:
3446 return Error(Loc,
3447 "instruction requires a CPU feature not currently enabled");
3448 case Match_InvalidOperand:
3449 return Error(Loc, "invalid operand for instruction");
3450 case Match_InvalidSuffix:
3451 return Error(Loc, "invalid type suffix for instruction");
3452 case Match_InvalidCondCode:
3453 return Error(Loc, "expected AArch64 condition code");
3454 case Match_AddSubRegExtendSmall:
3455 return Error(Loc,
3456 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3457 case Match_AddSubRegExtendLarge:
3458 return Error(Loc,
3459 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3460 case Match_AddSubSecondSource:
3461 return Error(Loc,
3462 "expected compatible register, symbol or integer in range [0, 4095]");
3463 case Match_LogicalSecondSource:
3464 return Error(Loc, "expected compatible register or logical immediate");
3465 case Match_InvalidMovImm32Shift:
3466 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3467 case Match_InvalidMovImm64Shift:
3468 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3469 case Match_AddSubRegShift32:
3470 return Error(Loc,
3471 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3472 case Match_AddSubRegShift64:
3473 return Error(Loc,
3474 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3475 case Match_InvalidFPImm:
3476 return Error(Loc,
3477 "expected compatible register or floating-point constant");
3478 case Match_InvalidMemoryIndexedSImm9:
3479 return Error(Loc, "index must be an integer in range [-256, 255].");
3480 case Match_InvalidMemoryIndexedSImm10:
3481 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
3482 case Match_InvalidMemoryIndexed4SImm7:
3483 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3484 case Match_InvalidMemoryIndexed8SImm7:
3485 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3486 case Match_InvalidMemoryIndexed16SImm7:
3487 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3488 case Match_InvalidMemoryWExtend8:
3489 return Error(Loc,
3490 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3491 case Match_InvalidMemoryWExtend16:
3492 return Error(Loc,
3493 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3494 case Match_InvalidMemoryWExtend32:
3495 return Error(Loc,
3496 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3497 case Match_InvalidMemoryWExtend64:
3498 return Error(Loc,
3499 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3500 case Match_InvalidMemoryWExtend128:
3501 return Error(Loc,
3502 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3503 case Match_InvalidMemoryXExtend8:
3504 return Error(Loc,
3505 "expected 'lsl' or 'sxtx' with optional shift of #0");
3506 case Match_InvalidMemoryXExtend16:
3507 return Error(Loc,
3508 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3509 case Match_InvalidMemoryXExtend32:
3510 return Error(Loc,
3511 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3512 case Match_InvalidMemoryXExtend64:
3513 return Error(Loc,
3514 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3515 case Match_InvalidMemoryXExtend128:
3516 return Error(Loc,
3517 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3518 case Match_InvalidMemoryIndexed1:
3519 return Error(Loc, "index must be an integer in range [0, 4095].");
3520 case Match_InvalidMemoryIndexed2:
3521 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3522 case Match_InvalidMemoryIndexed4:
3523 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3524 case Match_InvalidMemoryIndexed8:
3525 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3526 case Match_InvalidMemoryIndexed16:
3527 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3528 case Match_InvalidImm0_1:
3529 return Error(Loc, "immediate must be an integer in range [0, 1].");
3530 case Match_InvalidImm0_7:
3531 return Error(Loc, "immediate must be an integer in range [0, 7].");
3532 case Match_InvalidImm0_15:
3533 return Error(Loc, "immediate must be an integer in range [0, 15].");
3534 case Match_InvalidImm0_31:
3535 return Error(Loc, "immediate must be an integer in range [0, 31].");
3536 case Match_InvalidImm0_63:
3537 return Error(Loc, "immediate must be an integer in range [0, 63].");
3538 case Match_InvalidImm0_127:
3539 return Error(Loc, "immediate must be an integer in range [0, 127].");
3540 case Match_InvalidImm0_255:
3541 return Error(Loc, "immediate must be an integer in range [0, 255].");
3542 case Match_InvalidImm0_65535:
3543 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3544 case Match_InvalidImm1_8:
3545 return Error(Loc, "immediate must be an integer in range [1, 8].");
3546 case Match_InvalidImm1_16:
3547 return Error(Loc, "immediate must be an integer in range [1, 16].");
3548 case Match_InvalidImm1_32:
3549 return Error(Loc, "immediate must be an integer in range [1, 32].");
3550 case Match_InvalidImm1_64:
3551 return Error(Loc, "immediate must be an integer in range [1, 64].");
3552 case Match_InvalidIndex1:
3553 return Error(Loc, "expected lane specifier '[1]'");
3554 case Match_InvalidIndexB:
3555 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3556 case Match_InvalidIndexH:
3557 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3558 case Match_InvalidIndexS:
3559 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3560 case Match_InvalidIndexD:
3561 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3562 case Match_InvalidLabel:
3563 return Error(Loc, "expected label or encodable integer pc offset");
3564 case Match_MRS:
3565 return Error(Loc, "expected readable system register");
3566 case Match_MSR:
3567 return Error(Loc, "expected writable system register or pstate");
3568 case Match_InvalidComplexRotationEven:
3569 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
3570 case Match_InvalidComplexRotationOdd:
3571 return Error(Loc, "complex rotation must be 90 or 270.");
3572 case Match_MnemonicFail: {
3573 std::string Suggestion = AArch64MnemonicSpellCheck(
3574 ((AArch64Operand &)*Operands[0]).getToken(),
3575 ComputeAvailableFeatures(STI->getFeatureBits()));
3576 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
3577 }
3578 default:
3579 llvm_unreachable("unexpected error code!")::llvm::llvm_unreachable_internal("unexpected error code!", "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3579)
;
3580 }
3581}
3582
3583static const char *getSubtargetFeatureName(uint64_t Val);
3584
3585bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3586 OperandVector &Operands,
3587 MCStreamer &Out,
3588 uint64_t &ErrorInfo,
3589 bool MatchingInlineAsm) {
3590 assert(!Operands.empty() && "Unexpect empty operand list!")(static_cast <bool> (!Operands.empty() && "Unexpect empty operand list!"
) ? void (0) : __assert_fail ("!Operands.empty() && \"Unexpect empty operand list!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3590, __extension__ __PRETTY_FUNCTION__))
;
3591 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3592 assert(Op.isToken() && "Leading operand should always be a mnemonic!")(static_cast <bool> (Op.isToken() && "Leading operand should always be a mnemonic!"
) ? void (0) : __assert_fail ("Op.isToken() && \"Leading operand should always be a mnemonic!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3592, __extension__ __PRETTY_FUNCTION__))
;
3593
3594 StringRef Tok = Op.getToken();
3595 unsigned NumOperands = Operands.size();
3596
3597 if (NumOperands == 4 && Tok == "lsl") {
3598 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3599 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3600 if (Op2.isReg() && Op3.isImm()) {
3601 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3602 if (Op3CE) {
3603 uint64_t Op3Val = Op3CE->getValue();
3604 uint64_t NewOp3Val = 0;
3605 uint64_t NewOp4Val = 0;
3606 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3607 Op2.getReg())) {
3608 NewOp3Val = (32 - Op3Val) & 0x1f;
3609 NewOp4Val = 31 - Op3Val;
3610 } else {
3611 NewOp3Val = (64 - Op3Val) & 0x3f;
3612 NewOp4Val = 63 - Op3Val;
3613 }
3614
3615 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3616 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3617
3618 Operands[0] = AArch64Operand::CreateToken(
3619 "ubfm", false, Op.getStartLoc(), getContext());
3620 Operands.push_back(AArch64Operand::CreateImm(
3621 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3622 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3623 Op3.getEndLoc(), getContext());
3624 }
3625 }
3626 } else if (NumOperands == 4 && Tok == "bfc") {
3627 // FIXME: Horrible hack to handle BFC->BFM alias.
3628 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3629 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3630 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3631
3632 if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3633 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3634 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3635
3636 if (LSBCE && WidthCE) {
3637 uint64_t LSB = LSBCE->getValue();
3638 uint64_t Width = WidthCE->getValue();
3639
3640 uint64_t RegWidth = 0;
3641 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3642 Op1.getReg()))
3643 RegWidth = 64;
3644 else
3645 RegWidth = 32;
3646
3647 if (LSB >= RegWidth)
3648 return Error(LSBOp.getStartLoc(),
3649 "expected integer in range [0, 31]");
3650 if (Width < 1 || Width > RegWidth)
3651 return Error(WidthOp.getStartLoc(),
3652 "expected integer in range [1, 32]");
3653
3654 uint64_t ImmR = 0;
3655 if (RegWidth == 32)
3656 ImmR = (32 - LSB) & 0x1f;
3657 else
3658 ImmR = (64 - LSB) & 0x3f;
3659
3660 uint64_t ImmS = Width - 1;
3661
3662 if (ImmR != 0 && ImmS >= ImmR)
3663 return Error(WidthOp.getStartLoc(),
3664 "requested insert overflows register");
3665
3666 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3667 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3668 Operands[0] = AArch64Operand::CreateToken(
3669 "bfm", false, Op.getStartLoc(), getContext());
3670 Operands[2] = AArch64Operand::CreateReg(
3671 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
3672 SMLoc(), SMLoc(), getContext());
3673 Operands[3] = AArch64Operand::CreateImm(
3674 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3675 Operands.emplace_back(
3676 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3677 WidthOp.getEndLoc(), getContext()));
3678 }
3679 }
3680 } else if (NumOperands == 5) {
3681 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3682 // UBFIZ -> UBFM aliases.
3683 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3684 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3685 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3686 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3687
3688 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3689 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3690 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3691
3692 if (Op3CE && Op4CE) {
3693 uint64_t Op3Val = Op3CE->getValue();
3694 uint64_t Op4Val = Op4CE->getValue();
3695
3696 uint64_t RegWidth = 0;
3697 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3698 Op1.getReg()))
3699 RegWidth = 64;
3700 else
3701 RegWidth = 32;
3702
3703 if (Op3Val >= RegWidth)
3704 return Error(Op3.getStartLoc(),
3705 "expected integer in range [0, 31]");
3706 if (Op4Val < 1 || Op4Val > RegWidth)
3707 return Error(Op4.getStartLoc(),
3708 "expected integer in range [1, 32]");
3709
3710 uint64_t NewOp3Val = 0;
3711 if (RegWidth == 32)
3712 NewOp3Val = (32 - Op3Val) & 0x1f;
3713 else
3714 NewOp3Val = (64 - Op3Val) & 0x3f;
3715
3716 uint64_t NewOp4Val = Op4Val - 1;
3717
3718 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3719 return Error(Op4.getStartLoc(),
3720 "requested insert overflows register");
3721
3722 const MCExpr *NewOp3 =
3723 MCConstantExpr::create(NewOp3Val, getContext());
3724 const MCExpr *NewOp4 =
3725 MCConstantExpr::create(NewOp4Val, getContext());
3726 Operands[3] = AArch64Operand::CreateImm(
3727 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3728 Operands[4] = AArch64Operand::CreateImm(
3729 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3730 if (Tok == "bfi")
3731 Operands[0] = AArch64Operand::CreateToken(
3732 "bfm", false, Op.getStartLoc(), getContext());
3733 else if (Tok == "sbfiz")
3734 Operands[0] = AArch64Operand::CreateToken(
3735 "sbfm", false, Op.getStartLoc(), getContext());
3736 else if (Tok == "ubfiz")
3737 Operands[0] = AArch64Operand::CreateToken(
3738 "ubfm", false, Op.getStartLoc(), getContext());
3739 else
3740 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3740)
;
3741 }
3742 }
3743
3744 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3745 // UBFX -> UBFM aliases.
3746 } else if (NumOperands == 5 &&
3747 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3748 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3749 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3750 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3751
3752 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3753 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3754 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3755
3756 if (Op3CE && Op4CE) {
3757 uint64_t Op3Val = Op3CE->getValue();
3758 uint64_t Op4Val = Op4CE->getValue();
3759
3760 uint64_t RegWidth = 0;
3761 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3762 Op1.getReg()))
3763 RegWidth = 64;
3764 else
3765 RegWidth = 32;
3766
3767 if (Op3Val >= RegWidth)
3768 return Error(Op3.getStartLoc(),
3769 "expected integer in range [0, 31]");
3770 if (Op4Val < 1 || Op4Val > RegWidth)
3771 return Error(Op4.getStartLoc(),
3772 "expected integer in range [1, 32]");
3773
3774 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3775
3776 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3777 return Error(Op4.getStartLoc(),
3778 "requested extract overflows register");
3779
3780 const MCExpr *NewOp4 =
3781 MCConstantExpr::create(NewOp4Val, getContext());
3782 Operands[4] = AArch64Operand::CreateImm(
3783 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3784 if (Tok == "bfxil")
3785 Operands[0] = AArch64Operand::CreateToken(
3786 "bfm", false, Op.getStartLoc(), getContext());
3787 else if (Tok == "sbfx")
3788 Operands[0] = AArch64Operand::CreateToken(
3789 "sbfm", false, Op.getStartLoc(), getContext());
3790 else if (Tok == "ubfx")
3791 Operands[0] = AArch64Operand::CreateToken(
3792 "ubfm", false, Op.getStartLoc(), getContext());
3793 else
3794 llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?"
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3794)
;
3795 }
3796 }
3797 }
3798 }
3799
3800 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
3801 // instruction for FP registers correctly in some rare circumstances. Convert
3802 // it to a safe instruction and warn (because silently changing someone's
3803 // assembly is rude).
3804 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
3805 NumOperands == 4 && Tok == "movi") {
3806 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3807 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3808 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3809 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
3810 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
3811 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
3812 if (Suffix.lower() == ".2d" &&
3813 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
3814 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
3815 " correctly on this CPU, converting to equivalent movi.16b");
3816 // Switch the suffix to .16b.
3817 unsigned Idx = Op1.isToken() ? 1 : 2;
3818 Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
3819 getContext());
3820 }
3821 }
3822 }
3823
3824 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3825 // InstAlias can't quite handle this since the reg classes aren't
3826 // subclasses.
3827 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3828 // The source register can be Wn here, but the matcher expects a
3829 // GPR64. Twiddle it here if necessary.
3830 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3831 if (Op.isReg()) {
3832 unsigned Reg = getXRegFromWReg(Op.getReg());
3833 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
3834 Op.getStartLoc(), Op.getEndLoc(),
3835 getContext());
3836 }
3837 }
3838 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3839 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3840 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3841 if (Op.isReg() &&
3842 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3843 Op.getReg())) {
3844 // The source register can be Wn here, but the matcher expects a
3845 // GPR64. Twiddle it here if necessary.
3846 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3847 if (Op.isReg()) {
3848 unsigned Reg = getXRegFromWReg(Op.getReg());
3849 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
3850 Op.getStartLoc(),
3851 Op.getEndLoc(), getContext());
3852 }
3853 }
3854 }
3855 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3856 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3857 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3858 if (Op.isReg() &&
3859 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3860 Op.getReg())) {
3861 // The source register can be Wn here, but the matcher expects a
3862 // GPR32. Twiddle it here if necessary.
3863 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3864 if (Op.isReg()) {
3865 unsigned Reg = getWRegFromXReg(Op.getReg());
3866 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
3867 Op.getStartLoc(),
3868 Op.getEndLoc(), getContext());
3869 }
3870 }
3871 }
3872
3873 MCInst Inst;
3874 // First try to match against the secondary set of tables containing the
3875 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3876 unsigned MatchResult =
3877 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3878
3879 // If that fails, try against the alternate table containing long-form NEON:
3880 // "fadd v0.2s, v1.2s, v2.2s"
3881 if (MatchResult != Match_Success) {
3882 // But first, save the short-form match result: we can use it in case the
3883 // long-form match also fails.
3884 auto ShortFormNEONErrorInfo = ErrorInfo;
3885 auto ShortFormNEONMatchResult = MatchResult;
3886
3887 MatchResult =
3888 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3889
3890 // Now, both matches failed, and the long-form match failed on the mnemonic
3891 // suffix token operand. The short-form match failure is probably more
3892 // relevant: use it instead.
3893 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
3894 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
3895 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
3896 MatchResult = ShortFormNEONMatchResult;
3897 ErrorInfo = ShortFormNEONErrorInfo;
3898 }
3899 }
3900
3901 switch (MatchResult) {
3902 case Match_Success: {
3903 // Perform range checking and other semantic validations
3904 SmallVector<SMLoc, 8> OperandLocs;
3905 NumOperands = Operands.size();
3906 for (unsigned i = 1; i < NumOperands; ++i)
3907 OperandLocs.push_back(Operands[i]->getStartLoc());
3908 if (validateInstruction(Inst, OperandLocs))
3909 return true;
3910
3911 Inst.setLoc(IDLoc);
3912 Out.EmitInstruction(Inst, getSTI());
3913 return false;
3914 }
3915 case Match_MissingFeature: {
3916 assert(ErrorInfo && "Unknown missing feature!")(static_cast <bool> (ErrorInfo && "Unknown missing feature!"
) ? void (0) : __assert_fail ("ErrorInfo && \"Unknown missing feature!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 3916, __extension__ __PRETTY_FUNCTION__))
;
3917 // Special case the error message for the very common case where only
3918 // a single subtarget feature is missing (neon, e.g.).
3919 std::string Msg = "instruction requires:";
3920 uint64_t Mask = 1;
3921 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3922 if (ErrorInfo & Mask) {
3923 Msg += " ";
3924 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3925 }
3926 Mask <<= 1;
3927 }
3928 return Error(IDLoc, Msg);
3929 }
3930 case Match_MnemonicFail:
3931 return showMatchError(IDLoc, MatchResult, Operands);
3932 case Match_InvalidOperand: {
3933 SMLoc ErrorLoc = IDLoc;
3934
3935 if (ErrorInfo != ~0ULL) {
3936 if (ErrorInfo >= Operands.size())
3937 return Error(IDLoc, "too few operands for instruction",
3938 SMRange(IDLoc, getTok().getLoc()));
3939
3940 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3941 if (ErrorLoc == SMLoc())
3942 ErrorLoc = IDLoc;
3943 }
3944 // If the match failed on a suffix token operand, tweak the diagnostic
3945 // accordingly.
3946 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3947 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3948 MatchResult = Match_InvalidSuffix;
3949
3950 return showMatchError(ErrorLoc, MatchResult, Operands);
3951 }
3952 case Match_InvalidMemoryIndexed1:
3953 case Match_InvalidMemoryIndexed2:
3954 case Match_InvalidMemoryIndexed4:
3955 case Match_InvalidMemoryIndexed8:
3956 case Match_InvalidMemoryIndexed16:
3957 case Match_InvalidCondCode:
3958 case Match_AddSubRegExtendSmall:
3959 case Match_AddSubRegExtendLarge:
3960 case Match_AddSubSecondSource:
3961 case Match_LogicalSecondSource:
3962 case Match_AddSubRegShift32:
3963 case Match_AddSubRegShift64:
3964 case Match_InvalidMovImm32Shift:
3965 case Match_InvalidMovImm64Shift:
3966 case Match_InvalidFPImm:
3967 case Match_InvalidMemoryWExtend8:
3968 case Match_InvalidMemoryWExtend16:
3969 case Match_InvalidMemoryWExtend32:
3970 case Match_InvalidMemoryWExtend64:
3971 case Match_InvalidMemoryWExtend128:
3972 case Match_InvalidMemoryXExtend8:
3973 case Match_InvalidMemoryXExtend16:
3974 case Match_InvalidMemoryXExtend32:
3975 case Match_InvalidMemoryXExtend64:
3976 case Match_InvalidMemoryXExtend128:
3977 case Match_InvalidMemoryIndexed4SImm7:
3978 case Match_InvalidMemoryIndexed8SImm7:
3979 case Match_InvalidMemoryIndexed16SImm7:
3980 case Match_InvalidMemoryIndexedSImm9:
3981 case Match_InvalidMemoryIndexedSImm10:
3982 case Match_InvalidImm0_1:
3983 case Match_InvalidImm0_7:
3984 case Match_InvalidImm0_15:
3985 case Match_InvalidImm0_31:
3986 case Match_InvalidImm0_63:
3987 case Match_InvalidImm0_127:
3988 case Match_InvalidImm0_255:
3989 case Match_InvalidImm0_65535:
3990 case Match_InvalidImm1_8:
3991 case Match_InvalidImm1_16:
3992 case Match_InvalidImm1_32:
3993 case Match_InvalidImm1_64:
3994 case Match_InvalidIndex1:
3995 case Match_InvalidIndexB:
3996 case Match_InvalidIndexH:
3997 case Match_InvalidIndexS:
3998 case Match_InvalidIndexD:
3999 case Match_InvalidLabel:
4000 case Match_InvalidComplexRotationEven:
4001 case Match_InvalidComplexRotationOdd:
4002 case Match_MSR:
4003 case Match_MRS: {
4004 if (ErrorInfo >= Operands.size())
4005 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
4006 // Any time we get here, there's nothing fancy to do. Just get the
4007 // operand SMLoc and display the diagnostic.
4008 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4009 if (ErrorLoc == SMLoc())
4010 ErrorLoc = IDLoc;
4011 return showMatchError(ErrorLoc, MatchResult, Operands);
4012 }
4013 }
4014
4015 llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!"
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4015)
;
4016}
4017
4018/// ParseDirective parses the arm specific directives
4019bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4020 const MCObjectFileInfo::Environment Format =
4021 getContext().getObjectFileInfo()->getObjectFileType();
4022 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4023 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4024
4025 StringRef IDVal = DirectiveID.getIdentifier();
4026 SMLoc Loc = DirectiveID.getLoc();
4027 if (IDVal == ".arch")
4028 parseDirectiveArch(Loc);
4029 else if (IDVal == ".cpu")
4030 parseDirectiveCPU(Loc);
4031 else if (IDVal == ".hword")
4032 parseDirectiveWord(2, Loc);
4033 else if (IDVal == ".word")
4034 parseDirectiveWord(4, Loc);
4035 else if (IDVal == ".xword")
4036 parseDirectiveWord(8, Loc);
4037 else if (IDVal == ".tlsdesccall")
4038 parseDirectiveTLSDescCall(Loc);
4039 else if (IDVal == ".ltorg" || IDVal == ".pool")
4040 parseDirectiveLtorg(Loc);
4041 else if (IDVal == ".unreq")
4042 parseDirectiveUnreq(Loc);
4043 else if (!IsMachO && !IsCOFF) {
4044 if (IDVal == ".inst")
4045 parseDirectiveInst(Loc);
4046 else
4047 return true;
4048 } else if (IDVal == MCLOHDirectiveName())
4049 parseDirectiveLOH(IDVal, Loc);
4050 else
4051 return true;
4052 return false;
4053}
4054
4055static const struct {
4056 const char *Name;
4057 const FeatureBitset Features;
4058} ExtensionMap[] = {
4059 { "crc", {AArch64::FeatureCRC} },
4060 { "crypto", {AArch64::FeatureCrypto} },
4061 { "fp", {AArch64::FeatureFPARMv8} },
4062 { "simd", {AArch64::FeatureNEON} },
4063 { "ras", {AArch64::FeatureRAS} },
4064 { "lse", {AArch64::FeatureLSE} },
4065
4066 // FIXME: Unsupported extensions
4067 { "pan", {} },
4068 { "lor", {} },
4069 { "rdma", {} },
4070 { "profile", {} },
4071};
4072
4073/// parseDirectiveArch
4074/// ::= .arch token
4075bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
4076 SMLoc ArchLoc = getLoc();
4077
4078 StringRef Arch, ExtensionString;
4079 std::tie(Arch, ExtensionString) =
4080 getParser().parseStringToEndOfStatement().trim().split('+');
4081
4082 AArch64::ArchKind ID = AArch64::parseArch(Arch);
4083 if (ID == AArch64::ArchKind::INVALID)
4084 return Error(ArchLoc, "unknown arch name");
4085
4086 if (parseToken(AsmToken::EndOfStatement))
4087 return true;
4088
4089 // Get the architecture and extension features.
4090 std::vector<StringRef> AArch64Features;
4091 AArch64::getArchFeatures(ID, AArch64Features);
4092 AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
4093 AArch64Features);
4094
4095 MCSubtargetInfo &STI = copySTI();
4096 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
4097 STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
4098
4099 SmallVector<StringRef, 4> RequestedExtensions;
4100 if (!ExtensionString.empty())
4101 ExtensionString.split(RequestedExtensions, '+');
4102
4103 FeatureBitset Features = STI.getFeatureBits();
4104 for (auto Name : RequestedExtensions) {
4105 bool EnableFeature = true;
4106
4107 if (Name.startswith_lower("no")) {
4108 EnableFeature = false;
4109 Name = Name.substr(2);
4110 }
4111
4112 for (const auto &Extension : ExtensionMap) {
4113 if (Extension.Name != Name)
4114 continue;
4115
4116 if (Extension.Features.none())
4117 report_fatal_error("unsupported architectural extension: " + Name);
4118
4119 FeatureBitset ToggleFeatures = EnableFeature
4120 ? (~Features & Extension.Features)
4121 : ( Features & Extension.Features);
4122 uint64_t Features =
4123 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
4124 setAvailableFeatures(Features);
4125 break;
4126 }
4127 }
4128 return false;
4129}
4130
4131static SMLoc incrementLoc(SMLoc L, int Offset) {
4132 return SMLoc::getFromPointer(L.getPointer() + Offset);
4133}
4134
4135/// parseDirectiveCPU
4136/// ::= .cpu id
4137bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
4138 SMLoc CurLoc = getLoc();
4139
4140 StringRef CPU, ExtensionString;
4141 std::tie(CPU, ExtensionString) =
4142 getParser().parseStringToEndOfStatement().trim().split('+');
4143
4144 if (parseToken(AsmToken::EndOfStatement))
4145 return true;
4146
4147 SmallVector<StringRef, 4> RequestedExtensions;
4148 if (!ExtensionString.empty())
4149 ExtensionString.split(RequestedExtensions, '+');
4150
4151 // FIXME This is using tablegen data, but should be moved to ARMTargetParser
4152 // once that is tablegen'ed
4153 if (!getSTI().isCPUStringValid(CPU)) {
4154 Error(CurLoc, "unknown CPU name");
4155 return false;
4156 }
4157
4158 MCSubtargetInfo &STI = copySTI();
4159 STI.setDefaultFeatures(CPU, "");
4160 CurLoc = incrementLoc(CurLoc, CPU.size());
4161
4162 FeatureBitset Features = STI.getFeatureBits();
4163 for (auto Name : RequestedExtensions) {
4164 // Advance source location past '+'.
4165 CurLoc = incrementLoc(CurLoc, 1);
4166
4167 bool EnableFeature = true;
4168
4169 if (Name.startswith_lower("no")) {
4170 EnableFeature = false;
4171 Name = Name.substr(2);
4172 }
4173
4174 bool FoundExtension = false;
4175 for (const auto &Extension : ExtensionMap) {
4176 if (Extension.Name != Name)
4177 continue;
4178
4179 if (Extension.Features.none())
4180 report_fatal_error("unsupported architectural extension: " + Name);
4181
4182 FeatureBitset ToggleFeatures = EnableFeature
4183 ? (~Features & Extension.Features)
4184 : ( Features & Extension.Features);
4185 uint64_t Features =
4186 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
4187 setAvailableFeatures(Features);
4188 FoundExtension = true;
4189
4190 break;
4191 }
4192
4193 if (!FoundExtension)
4194 Error(CurLoc, "unsupported architectural extension");
4195
4196 CurLoc = incrementLoc(CurLoc, Name.size());
4197 }
4198 return false;
4199}
4200
4201/// parseDirectiveWord
4202/// ::= .word [ expression (, expression)* ]
4203bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4204 auto parseOp = [&]() -> bool {
4205 const MCExpr *Value;
4206 if (getParser().parseExpression(Value))
4207 return true;
4208 getParser().getStreamer().EmitValue(Value, Size, L);
4209 return false;
4210 };
4211
4212 if (parseMany(parseOp))
4213 return true;
4214 return false;
4215}
4216
4217/// parseDirectiveInst
4218/// ::= .inst opcode [, ...]
4219bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4220 if (getLexer().is(AsmToken::EndOfStatement))
4221 return Error(Loc, "expected expression following '.inst' directive");
4222
4223 auto parseOp = [&]() -> bool {
4224 SMLoc L = getLoc();
4225 const MCExpr *Expr;
4226 if (check(getParser().parseExpression(Expr), L, "expected expression"))
4227 return true;
4228 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4229 if (check(!Value, L, "expected constant expression"))
4230 return true;
4231 getTargetStreamer().emitInst(Value->getValue());
4232 return false;
4233 };
4234
4235 if (parseMany(parseOp))
4236 return addErrorSuffix(" in '.inst' directive");
4237 return false;
4238}
4239
4240// parseDirectiveTLSDescCall:
4241// ::= .tlsdesccall symbol
4242bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4243 StringRef Name;
4244 if (check(getParser().parseIdentifier(Name), L,
4245 "expected symbol after directive") ||
4246 parseToken(AsmToken::EndOfStatement))
4247 return true;
4248
4249 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4250 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4251 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4252
4253 MCInst Inst;
4254 Inst.setOpcode(AArch64::TLSDESCCALL);
4255 Inst.addOperand(MCOperand::createExpr(Expr));
4256
4257 getParser().getStreamer().EmitInstruction(Inst, getSTI());
4258 return false;
4259}
4260
4261/// ::= .loh <lohName | lohId> label1, ..., labelN
4262/// The number of arguments depends on the loh identifier.
4263bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4264 MCLOHType Kind;
4265 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4266 if (getParser().getTok().isNot(AsmToken::Integer))
4267 return TokError("expected an identifier or a number in directive");
4268 // We successfully get a numeric value for the identifier.
4269 // Check if it is valid.
4270 int64_t Id = getParser().getTok().getIntVal();
4271 if (Id <= -1U && !isValidMCLOHType(Id))
4272 return TokError("invalid numeric identifier in directive");
4273 Kind = (MCLOHType)Id;
4274 } else {
4275 StringRef Name = getTok().getIdentifier();
4276 // We successfully parse an identifier.
4277 // Check if it is a recognized one.
4278 int Id = MCLOHNameToId(Name);
4279
4280 if (Id == -1)
4281 return TokError("invalid identifier in directive");
4282 Kind = (MCLOHType)Id;
4283 }
4284 // Consume the identifier.
4285 Lex();
4286 // Get the number of arguments of this LOH.
4287 int NbArgs = MCLOHIdToNbArgs(Kind);
4288
4289 assert(NbArgs != -1 && "Invalid number of arguments")(static_cast <bool> (NbArgs != -1 && "Invalid number of arguments"
) ? void (0) : __assert_fail ("NbArgs != -1 && \"Invalid number of arguments\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp"
, 4289, __extension__ __PRETTY_FUNCTION__))
;
4290
4291 SmallVector<MCSymbol *, 3> Args;
4292 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4293 StringRef Name;
4294 if (getParser().parseIdentifier(Name))
4295 return TokError("expected identifier in directive");
4296 Args.push_back(getContext().getOrCreateSymbol(Name));
4297
4298 if (Idx + 1 == NbArgs)
4299 break;
4300 if (parseToken(AsmToken::Comma,
4301 "unexpected token in '" + Twine(IDVal) + "' directive"))
4302 return true;
4303 }
4304 if (parseToken(AsmToken::EndOfStatement,
4305 "unexpected token in '" + Twine(IDVal) + "' directive"))
4306 return true;
4307
4308 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4309 return false;
4310}
4311
4312/// parseDirectiveLtorg
4313/// ::= .ltorg | .pool
4314bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4315 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
4316 return true;
4317 getTargetStreamer().emitCurrentConstantPool();
4318 return false;
4319}
4320
4321/// parseDirectiveReq
4322/// ::= name .req registername
4323bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4324 MCAsmParser &Parser = getParser();
4325 Parser.Lex(); // Eat the '.req' token.
4326 SMLoc SRegLoc = getLoc();
4327 int RegNum = tryParseRegister();
4328 RegKind RegisterKind = RegKind::Scalar;
4329
4330 if (RegNum == -1) {
4331 StringRef Kind;
4332 RegisterKind = RegKind::NeonVector;
4333 RegNum = tryMatchVectorRegister(Kind, false);
4334 if (!Kind.empty())
4335 return Error(SRegLoc, "vector register without type specifier expected");
4336 }
4337
4338 if (RegNum == -1) {
4339 StringRef Kind;
4340 RegisterKind = RegKind::SVEDataVector;
4341 OperandMatchResultTy Res =
4342 tryParseSVERegister(RegNum, Kind, RegKind::SVEDataVector);
4343
4344 if (Res == MatchOperand_ParseFail)
4345 return true;
4346
4347 if (Res == MatchOperand_Success && !Kind.empty())
4348 return Error(SRegLoc,
4349 "sve vector register without type specifier expected");
4350 }
4351
4352 if (RegNum == -1)
4353 return Error(SRegLoc, "register name or alias expected");
4354
4355 // Shouldn't be anything else.
4356 if (parseToken(AsmToken::EndOfStatement,
4357 "unexpected input in .req directive"))
4358 return true;
4359
4360 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
4361 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4362 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4363
4364 return false;
4365}
4366
4367/// parseDirectiveUneq
4368/// ::= .unreq registername
4369bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4370 MCAsmParser &Parser = getParser();
4371 if (getTok().isNot(AsmToken::Identifier))
4372 return TokError("unexpected input in .unreq directive.");
4373 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4374 Parser.Lex(); // Eat the identifier.
4375 if (parseToken(AsmToken::EndOfStatement))
4376 return addErrorSuffix("in '.unreq' directive");
4377 return false;
4378}
4379
4380bool
4381AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4382 AArch64MCExpr::VariantKind &ELFRefKind,
4383 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4384 int64_t &Addend) {
4385 ELFRefKind = AArch64MCExpr::VK_INVALID;
4386 DarwinRefKind = MCSymbolRefExpr::VK_None;
4387 Addend = 0;
4388
4389 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4390 ELFRefKind = AE->getKind();
4391 Expr = AE->getSubExpr();
4392 }
4393
4394 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4395 if (SE) {
4396 // It's a simple symbol reference with no addend.
4397 DarwinRefKind = SE->getKind();
4398 return true;
4399 }
4400
4401 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4402 if (!BE)
4403 return false;
4404
4405 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4406 if (!SE)
4407 return false;
4408 DarwinRefKind = SE->getKind();
4409
4410 if (BE->getOpcode() != MCBinaryExpr::Add &&
4411 BE->getOpcode() != MCBinaryExpr::Sub)
4412 return false;
4413
4414 // See if the addend is is a constant, otherwise there's more going
4415 // on here than we can deal with.
4416 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4417 if (!AddendExpr)
4418 return false;
4419
4420 Addend = AddendExpr->getValue();
4421 if (BE->getOpcode() == MCBinaryExpr::Sub)
4422 Addend = -Addend;
4423
4424 // It's some symbol reference + a constant addend, but really
4425 // shouldn't use both Darwin and ELF syntax.
4426 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4427 DarwinRefKind == MCSymbolRefExpr::VK_None;
4428}
4429
4430/// Force static initialization.
4431extern "C" void LLVMInitializeAArch64AsmParser() {
4432 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
4433 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
4434 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
4435}
4436
4437#define GET_REGISTER_MATCHER
4438#define GET_SUBTARGET_FEATURE_NAME
4439#define GET_MATCHER_IMPLEMENTATION
4440#define GET_MNEMONIC_SPELL_CHECKER
4441#include "AArch64GenAsmMatcher.inc"
4442
4443// Define this matcher function after the auto-generated include so we
4444// have the match class enum definitions.
4445unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4446 unsigned Kind) {
4447 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4448 // If the kind is a token for a literal immediate, check if our asm
4449 // operand matches. This is for InstAliases which have a fixed-value
4450 // immediate in the syntax.
4451 int64_t ExpectedVal;
4452 switch (Kind) {
4453 default:
4454 return Match_InvalidOperand;
4455 case MCK__35_0:
4456 ExpectedVal = 0;
4457 break;
4458 case MCK__35_1:
4459 ExpectedVal = 1;
4460 break;
4461 case MCK__35_12:
4462 ExpectedVal = 12;
4463 break;
4464 case MCK__35_16:
4465 ExpectedVal = 16;
4466 break;
4467 case MCK__35_2:
4468 ExpectedVal = 2;
4469 break;
4470 case MCK__35_24:
4471 ExpectedVal = 24;
4472 break;
4473 case MCK__35_3:
4474 ExpectedVal = 3;
4475 break;
4476 case MCK__35_32:
4477 ExpectedVal = 32;
4478 break;
4479 case MCK__35_4:
4480 ExpectedVal = 4;
4481 break;
4482 case MCK__35_48:
4483 ExpectedVal = 48;
4484 break;
4485 case MCK__35_6:
4486 ExpectedVal = 6;
4487 break;
4488 case MCK__35_64:
4489 ExpectedVal = 64;
4490 break;
4491 case MCK__35_8:
4492 ExpectedVal = 8;
4493 break;
4494 }
4495 if (!Op.isImm())
4496 return Match_InvalidOperand;
4497 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4498 if (!CE)
4499 return Match_InvalidOperand;
4500 if (CE->getValue() == ExpectedVal)
4501 return Match_Success;
4502 return Match_InvalidOperand;
4503}
4504
4505OperandMatchResultTy
4506AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4507
4508 SMLoc S = getLoc();
4509
4510 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4511 Error(S, "expected register");
4512 return MatchOperand_ParseFail;
4513 }
4514
4515 int FirstReg = tryParseRegister();
4516 if (FirstReg == -1) {
4517 return MatchOperand_ParseFail;
4518 }
4519 const MCRegisterClass &WRegClass =
4520 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4521 const MCRegisterClass &XRegClass =
4522 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4523
4524 bool isXReg = XRegClass.contains(FirstReg),
4525 isWReg = WRegClass.contains(FirstReg);
4526 if (!isXReg && !isWReg) {
4527 Error(S, "expected first even register of a "
4528 "consecutive same-size even/odd register pair");
4529 return MatchOperand_ParseFail;
4530 }
4531
4532 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4533 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4534
4535 if (FirstEncoding & 0x1) {
4536 Error(S, "expected first even register of a "
4537 "consecutive same-size even/odd register pair");
4538 return MatchOperand_ParseFail;
4539 }
4540
4541 SMLoc M = getLoc();
4542 if (getParser().getTok().isNot(AsmToken::Comma)) {
4543 Error(M, "expected comma");
4544 return MatchOperand_ParseFail;
4545 }
4546 // Eat the comma
4547 getParser().Lex();
4548
4549 SMLoc E = getLoc();
4550 int SecondReg = tryParseRegister();
4551 if (SecondReg ==-1) {
4552 return MatchOperand_ParseFail;
4553 }
4554
4555 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4556 (isXReg && !XRegClass.contains(SecondReg)) ||
4557 (isWReg && !WRegClass.contains(SecondReg))) {
4558 Error(E,"expected second odd register of a "
4559 "consecutive same-size even/odd register pair");
4560 return MatchOperand_ParseFail;
4561 }
4562
4563 unsigned Pair = 0;
4564 if (isXReg) {
4565 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4566 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4567 } else {
4568 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4569 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4570 }
4571
4572 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
4573 getLoc(), getContext()));
4574
4575 return MatchOperand_Success;
4576}
4577
4578template <bool ParseSuffix>
4579OperandMatchResultTy
4580AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
4581 const SMLoc S = getLoc();
4582 // Check for a SVE vector register specifier first.
4583 int RegNum = -1;
4584 StringRef Kind;
4585
4586 OperandMatchResultTy Res =
4587 tryParseSVERegister(RegNum, Kind, RegKind::SVEDataVector);
4588
4589 if (Res != MatchOperand_Success)
4590 return Res;
4591
4592 if (ParseSuffix && Kind.empty())
4593 return MatchOperand_NoMatch;
4594
4595 unsigned ElementWidth = StringSwitch<unsigned>(Kind.lower())
4596 .Case("", -1)
4597 .Case(".b", 8)
4598 .Case(".h", 16)
4599 .Case(".s", 32)
4600 .Case(".d", 64)
4601 .Case(".q", 128)
4602 .Default(0);
4603 if (!ElementWidth)
4604 return MatchOperand_NoMatch;
4605
4606 Operands.push_back(
4607 AArch64Operand::CreateReg(RegNum, RegKind::SVEDataVector, ElementWidth,
4608 S, S, getContext()));
4609
4610 return MatchOperand_Success;
4611}

/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/MC/MCParser/MCAsmParserExtension.h

1//===- llvm/MC/MCAsmParserExtension.h - Asm Parser Hooks --------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#ifndef LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H
11#define LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H
12
13#include "llvm/ADT/STLExtras.h"
14#include "llvm/ADT/StringRef.h"
15#include "llvm/MC/MCParser/MCAsmLexer.h"
16#include "llvm/MC/MCParser/MCAsmParser.h"
17#include "llvm/Support/SMLoc.h"
18
19namespace llvm {
20
21class Twine;
22
23/// \brief Generic interface for extending the MCAsmParser,
24/// which is implemented by target and object file assembly parser
25/// implementations.
26class MCAsmParserExtension {
27 MCAsmParser *Parser;
28
29protected:
30 MCAsmParserExtension();
31
32 // Helper template for implementing static dispatch functions.
33 template<typename T, bool (T::*Handler)(StringRef, SMLoc)>
34 static bool HandleDirective(MCAsmParserExtension *Target,
35 StringRef Directive,
36 SMLoc DirectiveLoc) {
37 T *Obj = static_cast<T*>(Target);
38 return (Obj->*Handler)(Directive, DirectiveLoc);
39 }
40
41 bool BracketExpressionsSupported = false;
42
43public:
44 MCAsmParserExtension(const MCAsmParserExtension &) = delete;
45 MCAsmParserExtension &operator=(const MCAsmParserExtension &) = delete;
46 virtual ~MCAsmParserExtension();
47
48 /// \brief Initialize the extension for parsing using the given \p Parser.
49 /// The extension should use the AsmParser interfaces to register its
50 /// parsing routines.
51 virtual void Initialize(MCAsmParser &Parser);
52
53 /// \name MCAsmParser Proxy Interfaces
54 /// @{
55
56 MCContext &getContext() { return getParser().getContext(); }
57
58 MCAsmLexer &getLexer() { return getParser().getLexer(); }
59 const MCAsmLexer &getLexer() const {
60 return const_cast<MCAsmParserExtension *>(this)->getLexer();
61 }
62
63 MCAsmParser &getParser() { return *Parser; }
64 const MCAsmParser &getParser() const {
65 return const_cast<MCAsmParserExtension*>(this)->getParser();
66 }
67
68 SourceMgr &getSourceManager() { return getParser().getSourceManager(); }
69 MCStreamer &getStreamer() { return getParser().getStreamer(); }
70
71 bool Warning(SMLoc L, const Twine &Msg) {
72 return getParser().Warning(L, Msg);
73 }
74
75 bool Error(SMLoc L, const Twine &Msg, SMRange Range = SMRange()) {
76 return getParser().Error(L, Msg, Range);
77 }
78
79 void Note(SMLoc L, const Twine &Msg) {
80 getParser().Note(L, Msg);
81 }
82
83 bool TokError(const Twine &Msg) {
84 return getParser().TokError(Msg);
140
Calling 'MCAsmParserExtension::getParser'
141
Returning from 'MCAsmParserExtension::getParser'
85 }
86
87 const AsmToken &Lex() { return getParser().Lex(); }
88 const AsmToken &getTok() { return getParser().getTok(); }
89 bool parseToken(AsmToken::TokenKind T,
90 const Twine &Msg = "unexpected token") {
91 return getParser().parseToken(T, Msg);
92 }
93
94 bool parseMany(function_ref<bool()> parseOne, bool hasComma = true) {
95 return getParser().parseMany(parseOne, hasComma);
96 }
97
98 bool parseOptionalToken(AsmToken::TokenKind T) {
99 return getParser().parseOptionalToken(T);
34
Calling 'MCAsmParserExtension::getParser'
35
Returning from 'MCAsmParserExtension::getParser'
62
Calling 'MCAsmParserExtension::getParser'
63
Returning from 'MCAsmParserExtension::getParser'
92
Calling 'MCAsmParserExtension::getParser'
93
Returning from 'MCAsmParserExtension::getParser'
122
Calling 'MCAsmParserExtension::getParser'
123
Returning from 'MCAsmParserExtension::getParser'
100 }
101
102 bool check(bool P, const Twine &Msg) {
103 return getParser().check(P, Msg);
104 }
105
106 bool check(bool P, SMLoc Loc, const Twine &Msg) {
107 return getParser().check(P, Loc, Msg);
108 }
109
110 bool addErrorSuffix(const Twine &Suffix) {
111 return getParser().addErrorSuffix(Suffix);
112 }
113
114 bool HasBracketExpressions() const { return BracketExpressionsSupported; }
115
116 /// @}
117};
118
119} // end namespace llvm
120
121#endif // LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H

/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/MC/MCParser/MCAsmLexer.h

1//===- llvm/MC/MCAsmLexer.h - Abstract Asm Lexer Interface ------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#ifndef LLVM_MC_MCPARSER_MCASMLEXER_H
11#define LLVM_MC_MCPARSER_MCASMLEXER_H
12
13#include "llvm/ADT/APInt.h"
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/SmallVector.h"
16#include "llvm/ADT/StringRef.h"
17#include "llvm/Support/SMLoc.h"
18#include <algorithm>
19#include <cassert>
20#include <cstddef>
21#include <cstdint>
22#include <string>
23
24namespace llvm {
25
26/// Target independent representation for an assembler token.
27class AsmToken {
28public:
29 enum TokenKind {
30 // Markers
31 Eof, Error,
32
33 // String values.
34 Identifier,
35 String,
36
37 // Integer values.
38 Integer,
39 BigNum, // larger than 64 bits
40
41 // Real values.
42 Real,
43
44 // Comments
45 Comment,
46 HashDirective,
47 // No-value.
48 EndOfStatement,
49 Colon,
50 Space,
51 Plus, Minus, Tilde,
52 Slash, // '/'
53 BackSlash, // '\'
54 LParen, RParen, LBrac, RBrac, LCurly, RCurly,
55 Star, Dot, Comma, Dollar, Equal, EqualEqual,
56
57 Pipe, PipePipe, Caret,
58 Amp, AmpAmp, Exclaim, ExclaimEqual, Percent, Hash,
59 Less, LessEqual, LessLess, LessGreater,
60 Greater, GreaterEqual, GreaterGreater, At,
61
62 // MIPS unary expression operators such as %neg.
63 PercentCall16, PercentCall_Hi, PercentCall_Lo, PercentDtprel_Hi,
64 PercentDtprel_Lo, PercentGot, PercentGot_Disp, PercentGot_Hi, PercentGot_Lo,
65 PercentGot_Ofst, PercentGot_Page, PercentGottprel, PercentGp_Rel, PercentHi,
66 PercentHigher, PercentHighest, PercentLo, PercentNeg, PercentPcrel_Hi,
67 PercentPcrel_Lo, PercentTlsgd, PercentTlsldm, PercentTprel_Hi,
68 PercentTprel_Lo
69 };
70
71private:
72 TokenKind Kind;
73
74 /// A reference to the entire token contents; this is always a pointer into
75 /// a memory buffer owned by the source manager.
76 StringRef Str;
77
78 APInt IntVal;
79
80public:
81 AsmToken() = default;
82 AsmToken(TokenKind Kind, StringRef Str, APInt IntVal)
83 : Kind(Kind), Str(Str), IntVal(std::move(IntVal)) {}
84 AsmToken(TokenKind Kind, StringRef Str, int64_t IntVal = 0)
85 : Kind(Kind), Str(Str), IntVal(64, IntVal, true) {}
86
87 TokenKind getKind() const { return Kind; }
88 bool is(TokenKind K) const { return Kind == K; }
89 bool isNot(TokenKind K) const { return Kind != K; }
128
Assuming the condition is true
90
91 SMLoc getLoc() const;
92 SMLoc getEndLoc() const;
93 SMRange getLocRange() const;
94
95 /// Get the contents of a string token (without quotes).
96 StringRef getStringContents() const {
97 assert(Kind == String && "This token isn't a string!")(static_cast <bool> (Kind == String && "This token isn't a string!"
) ? void (0) : __assert_fail ("Kind == String && \"This token isn't a string!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/MC/MCParser/MCAsmLexer.h"
, 97, __extension__ __PRETTY_FUNCTION__))
;
98 return Str.slice(1, Str.size() - 1);
99 }
100
101 /// Get the identifier string for the current token, which should be an
102 /// identifier or a string. This gets the portion of the string which should
103 /// be used as the identifier, e.g., it does not include the quotes on
104 /// strings.
105 StringRef getIdentifier() const {
106 if (Kind == Identifier)
107 return getString();
108 return getStringContents();
109 }
110
111 /// Get the string for the current token, this includes all characters (for
112 /// example, the quotes on strings) in the token.
113 ///
114 /// The returned StringRef points into the source manager's memory buffer, and
115 /// is safe to store across calls to Lex().
116 StringRef getString() const { return Str; }
117
118 // FIXME: Don't compute this in advance, it makes every token larger, and is
119 // also not generally what we want (it is nicer for recovery etc. to lex 123br
120 // as a single token, then diagnose as an invalid number).
121 int64_t getIntVal() const {
122 assert(Kind == Integer && "This token isn't an integer!")(static_cast <bool> (Kind == Integer && "This token isn't an integer!"
) ? void (0) : __assert_fail ("Kind == Integer && \"This token isn't an integer!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/MC/MCParser/MCAsmLexer.h"
, 122, __extension__ __PRETTY_FUNCTION__))
;
123 return IntVal.getZExtValue();
124 }
125
126 APInt getAPIntVal() const {
127 assert((Kind == Integer || Kind == BigNum) &&(static_cast <bool> ((Kind == Integer || Kind == BigNum
) && "This token isn't an integer!") ? void (0) : __assert_fail
("(Kind == Integer || Kind == BigNum) && \"This token isn't an integer!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/MC/MCParser/MCAsmLexer.h"
, 128, __extension__ __PRETTY_FUNCTION__))
128 "This token isn't an integer!")(static_cast <bool> ((Kind == Integer || Kind == BigNum
) && "This token isn't an integer!") ? void (0) : __assert_fail
("(Kind == Integer || Kind == BigNum) && \"This token isn't an integer!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/MC/MCParser/MCAsmLexer.h"
, 128, __extension__ __PRETTY_FUNCTION__))
;
129 return IntVal;
130 }
131};
132
133/// A callback class which is notified of each comment in an assembly file as
134/// it is lexed.
135class AsmCommentConsumer {
136public:
137 virtual ~AsmCommentConsumer() = default;
138
139 /// Callback function for when a comment is lexed. Loc is the start of the
140 /// comment text (excluding the comment-start marker). CommentText is the text
141 /// of the comment, excluding the comment start and end markers, and the
142 /// newline for single-line comments.
143 virtual void HandleComment(SMLoc Loc, StringRef CommentText) = 0;
144};
145
146
147/// Generic assembler lexer interface, for use by target specific assembly
148/// lexers.
149class MCAsmLexer {
150 /// The current token, stored in the base class for faster access.
151 SmallVector<AsmToken, 1> CurTok;
152
153 /// The location and description of the current error
154 SMLoc ErrLoc;
155 std::string Err;
156
157protected: // Can only create subclasses.
158 const char *TokStart = nullptr;
159 bool SkipSpace = true;
160 bool AllowAtInIdentifier;
161 bool IsAtStartOfStatement = true;
162 AsmCommentConsumer *CommentConsumer = nullptr;
163
164 bool AltMacroMode;
165 MCAsmLexer();
166
167 virtual AsmToken LexToken() = 0;
168
169 void SetError(SMLoc errLoc, const std::string &err) {
170 ErrLoc = errLoc;
171 Err = err;
172 }
173
174public:
175 MCAsmLexer(const MCAsmLexer &) = delete;
176 MCAsmLexer &operator=(const MCAsmLexer &) = delete;
177 virtual ~MCAsmLexer();
178
179 bool IsaAltMacroMode() {
180 return AltMacroMode;
181 }
182
183 void SetAltMacroMode(bool AltMacroSet) {
184 AltMacroMode = AltMacroSet;
185 }
186
187 /// Consume the next token from the input stream and return it.
188 ///
189 /// The lexer will continuosly return the end-of-file token once the end of
190 /// the main input file has been reached.
191 const AsmToken &Lex() {
192 assert(!CurTok.empty())(static_cast <bool> (!CurTok.empty()) ? void (0) : __assert_fail
("!CurTok.empty()", "/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/MC/MCParser/MCAsmLexer.h"
, 192, __extension__ __PRETTY_FUNCTION__))
;
193 // Mark if we parsing out a EndOfStatement.
194 IsAtStartOfStatement = CurTok.front().getKind() == AsmToken::EndOfStatement;
195 CurTok.erase(CurTok.begin());
196 // LexToken may generate multiple tokens via UnLex but will always return
197 // the first one. Place returned value at head of CurTok vector.
198 if (CurTok.empty()) {
199 AsmToken T = LexToken();
200 CurTok.insert(CurTok.begin(), T);
201 }
202 return CurTok.front();
203 }
204
205 void UnLex(AsmToken const &Token) {
206 IsAtStartOfStatement = false;
207 CurTok.insert(CurTok.begin(), Token);
208 }
209
210 bool isAtStartOfStatement() { return IsAtStartOfStatement; }
211
212 virtual StringRef LexUntilEndOfStatement() = 0;
213
214 /// Get the current source location.
215 SMLoc getLoc() const;
216
217 /// Get the current (last) lexed token.
218 const AsmToken &getTok() const {
219 return CurTok[0];
220 }
221
222 /// Look ahead at the next token to be lexed.
223 const AsmToken peekTok(bool ShouldSkipSpace = true) {
224 AsmToken Tok;
225
226 MutableArrayRef<AsmToken> Buf(Tok);
227 size_t ReadCount = peekTokens(Buf, ShouldSkipSpace);
228
229 assert(ReadCount == 1)(static_cast <bool> (ReadCount == 1) ? void (0) : __assert_fail
("ReadCount == 1", "/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/MC/MCParser/MCAsmLexer.h"
, 229, __extension__ __PRETTY_FUNCTION__))
;
230 (void)ReadCount;
231
232 return Tok;
233 }
234
235 /// Look ahead an arbitrary number of tokens.
236 virtual size_t peekTokens(MutableArrayRef<AsmToken> Buf,
237 bool ShouldSkipSpace = true) = 0;
238
239 /// Get the current error location
240 SMLoc getErrLoc() {
241 return ErrLoc;
242 }
243
244 /// Get the current error string
245 const std::string &getErr() {
246 return Err;
247 }
248
249 /// Get the kind of current token.
250 AsmToken::TokenKind getKind() const { return getTok().getKind(); }
251
252 /// Check if the current token has kind \p K.
253 bool is(AsmToken::TokenKind K) const { return getTok().is(K); }
254
255 /// Check if the current token has kind \p K.
256 bool isNot(AsmToken::TokenKind K) const { return getTok().isNot(K); }
257
258 /// Set whether spaces should be ignored by the lexer
259 void setSkipSpace(bool val) { SkipSpace = val; }
260
261 bool getAllowAtInIdentifier() { return AllowAtInIdentifier; }
262 void setAllowAtInIdentifier(bool v) { AllowAtInIdentifier = v; }
263
264 void setCommentConsumer(AsmCommentConsumer *CommentConsumer) {
265 this->CommentConsumer = CommentConsumer;
266 }
267};
268
269} // end namespace llvm
270
271#endif // LLVM_MC_MCPARSER_MCASMLEXER_H

/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/ADT/Twine.h

1//===- Twine.h - Fast Temporary String Concatenation ------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#ifndef LLVM_ADT_TWINE_H
11#define LLVM_ADT_TWINE_H
12
13#include "llvm/ADT/SmallVector.h"
14#include "llvm/ADT/StringRef.h"
15#include "llvm/Support/ErrorHandling.h"
16#include <cassert>
17#include <cstdint>
18#include <string>
19
20namespace llvm {
21
22 class formatv_object_base;
23 class raw_ostream;
24
25 /// Twine - A lightweight data structure for efficiently representing the
26 /// concatenation of temporary values as strings.
27 ///
28 /// A Twine is a kind of rope, it represents a concatenated string using a
29 /// binary-tree, where the string is the preorder of the nodes. Since the
30 /// Twine can be efficiently rendered into a buffer when its result is used,
31 /// it avoids the cost of generating temporary values for intermediate string
32 /// results -- particularly in cases when the Twine result is never
33 /// required. By explicitly tracking the type of leaf nodes, we can also avoid
34 /// the creation of temporary strings for conversions operations (such as
35 /// appending an integer to a string).
36 ///
37 /// A Twine is not intended for use directly and should not be stored, its
38 /// implementation relies on the ability to store pointers to temporary stack
39 /// objects which may be deallocated at the end of a statement. Twines should
40 /// only be used accepted as const references in arguments, when an API wishes
41 /// to accept possibly-concatenated strings.
42 ///
43 /// Twines support a special 'null' value, which always concatenates to form
44 /// itself, and renders as an empty string. This can be returned from APIs to
45 /// effectively nullify any concatenations performed on the result.
46 ///
47 /// \b Implementation
48 ///
49 /// Given the nature of a Twine, it is not possible for the Twine's
50 /// concatenation method to construct interior nodes; the result must be
51 /// represented inside the returned value. For this reason a Twine object
52 /// actually holds two values, the left- and right-hand sides of a
53 /// concatenation. We also have nullary Twine objects, which are effectively
54 /// sentinel values that represent empty strings.
55 ///
56 /// Thus, a Twine can effectively have zero, one, or two children. The \see
57 /// isNullary(), \see isUnary(), and \see isBinary() predicates exist for
58 /// testing the number of children.
59 ///
60 /// We maintain a number of invariants on Twine objects (FIXME: Why):
61 /// - Nullary twines are always represented with their Kind on the left-hand
62 /// side, and the Empty kind on the right-hand side.
63 /// - Unary twines are always represented with the value on the left-hand
64 /// side, and the Empty kind on the right-hand side.
65 /// - If a Twine has another Twine as a child, that child should always be
66 /// binary (otherwise it could have been folded into the parent).
67 ///
68 /// These invariants are check by \see isValid().
69 ///
70 /// \b Efficiency Considerations
71 ///
72 /// The Twine is designed to yield efficient and small code for common
73 /// situations. For this reason, the concat() method is inlined so that
74 /// concatenations of leaf nodes can be optimized into stores directly into a
75 /// single stack allocated object.
76 ///
77 /// In practice, not all compilers can be trusted to optimize concat() fully,
78 /// so we provide two additional methods (and accompanying operator+
79 /// overloads) to guarantee that particularly important cases (cstring plus
80 /// StringRef) codegen as desired.
81 class Twine {
82 /// NodeKind - Represent the type of an argument.
83 enum NodeKind : unsigned char {
84 /// An empty string; the result of concatenating anything with it is also
85 /// empty.
86 NullKind,
87
88 /// The empty string.
89 EmptyKind,
90
91 /// A pointer to a Twine instance.
92 TwineKind,
93
94 /// A pointer to a C string instance.
95 CStringKind,
96
97 /// A pointer to an std::string instance.
98 StdStringKind,
99
100 /// A pointer to a StringRef instance.
101 StringRefKind,
102
103 /// A pointer to a SmallString instance.
104 SmallStringKind,
105
106 /// A pointer to a formatv_object_base instance.
107 FormatvObjectKind,
108
109 /// A char value, to render as a character.
110 CharKind,
111
112 /// An unsigned int value, to render as an unsigned decimal integer.
113 DecUIKind,
114
115 /// An int value, to render as a signed decimal integer.
116 DecIKind,
117
118 /// A pointer to an unsigned long value, to render as an unsigned decimal
119 /// integer.
120 DecULKind,
121
122 /// A pointer to a long value, to render as a signed decimal integer.
123 DecLKind,
124
125 /// A pointer to an unsigned long long value, to render as an unsigned
126 /// decimal integer.
127 DecULLKind,
128
129 /// A pointer to a long long value, to render as a signed decimal integer.
130 DecLLKind,
131
132 /// A pointer to a uint64_t value, to render as an unsigned hexadecimal
133 /// integer.
134 UHexKind
135 };
136
137 union Child
138 {
139 const Twine *twine;
140 const char *cString;
141 const std::string *stdString;
142 const StringRef *stringRef;
143 const SmallVectorImpl<char> *smallString;
144 const formatv_object_base *formatvObject;
145 char character;
146 unsigned int decUI;
147 int decI;
148 const unsigned long *decUL;
149 const long *decL;
150 const unsigned long long *decULL;
151 const long long *decLL;
152 const uint64_t *uHex;
153 };
154
155 /// LHS - The prefix in the concatenation, which may be uninitialized for
156 /// Null or Empty kinds.
157 Child LHS;
158
159 /// RHS - The suffix in the concatenation, which may be uninitialized for
160 /// Null or Empty kinds.
161 Child RHS;
162
163 /// LHSKind - The NodeKind of the left hand side, \see getLHSKind().
164 NodeKind LHSKind = EmptyKind;
165
166 /// RHSKind - The NodeKind of the right hand side, \see getRHSKind().
167 NodeKind RHSKind = EmptyKind;
168
169 /// Construct a nullary twine; the kind must be NullKind or EmptyKind.
170 explicit Twine(NodeKind Kind) : LHSKind(Kind) {
171 assert(isNullary() && "Invalid kind!")(static_cast <bool> (isNullary() && "Invalid kind!"
) ? void (0) : __assert_fail ("isNullary() && \"Invalid kind!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/ADT/Twine.h"
, 171, __extension__ __PRETTY_FUNCTION__))
;
172 }
173
174 /// Construct a binary twine.
175 explicit Twine(const Twine &LHS, const Twine &RHS)
176 : LHSKind(TwineKind), RHSKind(TwineKind) {
177 this->LHS.twine = &LHS;
178 this->RHS.twine = &RHS;
179 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/ADT/Twine.h"
, 179, __extension__ __PRETTY_FUNCTION__))
;
180 }
181
182 /// Construct a twine from explicit values.
183 explicit Twine(Child LHS, NodeKind LHSKind, Child RHS, NodeKind RHSKind)
184 : LHS(LHS), RHS(RHS), LHSKind(LHSKind), RHSKind(RHSKind) {
185 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/ADT/Twine.h"
, 185, __extension__ __PRETTY_FUNCTION__))
;
186 }
187
188 /// Check for the null twine.
189 bool isNull() const {
190 return getLHSKind() == NullKind;
191 }
192
193 /// Check for the empty twine.
194 bool isEmpty() const {
195 return getLHSKind() == EmptyKind;
196 }
197
198 /// Check if this is a nullary twine (null or empty).
199 bool isNullary() const {
200 return isNull() || isEmpty();
201 }
202
203 /// Check if this is a unary twine.
204 bool isUnary() const {
205 return getRHSKind() == EmptyKind && !isNullary();
206 }
207
208 /// Check if this is a binary twine.
209 bool isBinary() const {
210 return getLHSKind() != NullKind && getRHSKind() != EmptyKind;
211 }
212
213 /// Check if this is a valid twine (satisfying the invariants on
214 /// order and number of arguments).
215 bool isValid() const {
216 // Nullary twines always have Empty on the RHS.
217 if (isNullary() && getRHSKind() != EmptyKind)
218 return false;
219
220 // Null should never appear on the RHS.
221 if (getRHSKind() == NullKind)
222 return false;
223
224 // The RHS cannot be non-empty if the LHS is empty.
225 if (getRHSKind() != EmptyKind && getLHSKind() == EmptyKind)
226 return false;
227
228 // A twine child should always be binary.
229 if (getLHSKind() == TwineKind &&
230 !LHS.twine->isBinary())
231 return false;
232 if (getRHSKind() == TwineKind &&
233 !RHS.twine->isBinary())
234 return false;
235
236 return true;
237 }
238
239 /// Get the NodeKind of the left-hand side.
240 NodeKind getLHSKind() const { return LHSKind; }
241
242 /// Get the NodeKind of the right-hand side.
243 NodeKind getRHSKind() const { return RHSKind; }
244
245 /// Print one child from a twine.
246 void printOneChild(raw_ostream &OS, Child Ptr, NodeKind Kind) const;
247
248 /// Print the representation of one child from a twine.
249 void printOneChildRepr(raw_ostream &OS, Child Ptr,
250 NodeKind Kind) const;
251
252 public:
253 /// @name Constructors
254 /// @{
255
256 /// Construct from an empty string.
257 /*implicit*/ Twine() {
258 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/ADT/Twine.h"
, 258, __extension__ __PRETTY_FUNCTION__))
;
259 }
260
261 Twine(const Twine &) = default;
262
263 /// Construct from a C string.
264 ///
265 /// We take care here to optimize "" into the empty twine -- this will be
266 /// optimized out for string constants. This allows Twine arguments have
267 /// default "" values, without introducing unnecessary string constants.
268 /*implicit*/ Twine(const char *Str) {
132
Calling implicit default constructor for 'Child'
133
Returning from default constructor for 'Child'
134
Calling implicit default constructor for 'Child'
135
Returning from default constructor for 'Child'
269 if (Str[0] != '\0') {
136
Taking true branch
270 LHS.cString = Str;
271 LHSKind = CStringKind;
272 } else
273 LHSKind = EmptyKind;
274
275 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/ADT/Twine.h"
, 275, __extension__ __PRETTY_FUNCTION__))
;
137
Within the expansion of the macro 'assert':
a
Assuming the condition is true
276 }
277
278 /// Construct from an std::string.
279 /*implicit*/ Twine(const std::string &Str) : LHSKind(StdStringKind) {
280 LHS.stdString = &Str;
281 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/ADT/Twine.h"
, 281, __extension__ __PRETTY_FUNCTION__))
;
282 }
283
284 /// Construct from a StringRef.
285 /*implicit*/ Twine(const StringRef &Str) : LHSKind(StringRefKind) {
286 LHS.stringRef = &Str;
287 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/ADT/Twine.h"
, 287, __extension__ __PRETTY_FUNCTION__))
;
288 }
289
290 /// Construct from a SmallString.
291 /*implicit*/ Twine(const SmallVectorImpl<char> &Str)
292 : LHSKind(SmallStringKind) {
293 LHS.smallString = &Str;
294 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/ADT/Twine.h"
, 294, __extension__ __PRETTY_FUNCTION__))
;
295 }
296
297 /// Construct from a formatv_object_base.
298 /*implicit*/ Twine(const formatv_object_base &Fmt)
299 : LHSKind(FormatvObjectKind) {
300 LHS.formatvObject = &Fmt;
301 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/ADT/Twine.h"
, 301, __extension__ __PRETTY_FUNCTION__))
;
302 }
303
304 /// Construct from a char.
305 explicit Twine(char Val) : LHSKind(CharKind) {
306 LHS.character = Val;
307 }
308
309 /// Construct from a signed char.
310 explicit Twine(signed char Val) : LHSKind(CharKind) {
311 LHS.character = static_cast<char>(Val);
312 }
313
314 /// Construct from an unsigned char.
315 explicit Twine(unsigned char Val) : LHSKind(CharKind) {
316 LHS.character = static_cast<char>(Val);
317 }
318
319 /// Construct a twine to print \p Val as an unsigned decimal integer.
320 explicit Twine(unsigned Val) : LHSKind(DecUIKind) {
321 LHS.decUI = Val;
322 }
323
324 /// Construct a twine to print \p Val as a signed decimal integer.
325 explicit Twine(int Val) : LHSKind(DecIKind) {
326 LHS.decI = Val;
327 }
328
329 /// Construct a twine to print \p Val as an unsigned decimal integer.
330 explicit Twine(const unsigned long &Val) : LHSKind(DecULKind) {
331 LHS.decUL = &Val;
332 }
333
334 /// Construct a twine to print \p Val as a signed decimal integer.
335 explicit Twine(const long &Val) : LHSKind(DecLKind) {
336 LHS.decL = &Val;
337 }
338
339 /// Construct a twine to print \p Val as an unsigned decimal integer.
340 explicit Twine(const unsigned long long &Val) : LHSKind(DecULLKind) {
341 LHS.decULL = &Val;
342 }
343
344 /// Construct a twine to print \p Val as a signed decimal integer.
345 explicit Twine(const long long &Val) : LHSKind(DecLLKind) {
346 LHS.decLL = &Val;
347 }
348
349 // FIXME: Unfortunately, to make sure this is as efficient as possible we
350 // need extra binary constructors from particular types. We can't rely on
351 // the compiler to be smart enough to fold operator+()/concat() down to the
352 // right thing. Yet.
353
354 /// Construct as the concatenation of a C string and a StringRef.
355 /*implicit*/ Twine(const char *LHS, const StringRef &RHS)
356 : LHSKind(CStringKind), RHSKind(StringRefKind) {
357 this->LHS.cString = LHS;
358 this->RHS.stringRef = &RHS;
359 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/ADT/Twine.h"
, 359, __extension__ __PRETTY_FUNCTION__))
;
360 }
361
362 /// Construct as the concatenation of a StringRef and a C string.
363 /*implicit*/ Twine(const StringRef &LHS, const char *RHS)
364 : LHSKind(StringRefKind), RHSKind(CStringKind) {
365 this->LHS.stringRef = &LHS;
366 this->RHS.cString = RHS;
367 assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!"
) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/ADT/Twine.h"
, 367, __extension__ __PRETTY_FUNCTION__))
;
368 }
369
370 /// Since the intended use of twines is as temporary objects, assignments
371 /// when concatenating might cause undefined behavior or stack corruptions
372 Twine &operator=(const Twine &) = delete;
373
374 /// Create a 'null' string, which is an empty string that always
375 /// concatenates to form another empty string.
376 static Twine createNull() {
377 return Twine(NullKind);
378 }
379
380 /// @}
381 /// @name Numeric Conversions
382 /// @{
383
384 // Construct a twine to print \p Val as an unsigned hexadecimal integer.
385 static Twine utohexstr(const uint64_t &Val) {
386 Child LHS, RHS;
387 LHS.uHex = &Val;
388 RHS.twine = nullptr;
389 return Twine(LHS, UHexKind, RHS, EmptyKind);
390 }
391
392 /// @}
393 /// @name Predicate Operations
394 /// @{
395
396 /// Check if this twine is trivially empty; a false return value does not
397 /// necessarily mean the twine is empty.
398 bool isTriviallyEmpty() const {
399 return isNullary();
400 }
401
402 /// Return true if this twine can be dynamically accessed as a single
403 /// StringRef value with getSingleStringRef().
404 bool isSingleStringRef() const {
405 if (getRHSKind() != EmptyKind) return false;
406
407 switch (getLHSKind()) {
408 case EmptyKind:
409 case CStringKind:
410 case StdStringKind:
411 case StringRefKind:
412 case SmallStringKind:
413 return true;
414 default:
415 return false;
416 }
417 }
418
419 /// @}
420 /// @name String Operations
421 /// @{
422
423 Twine concat(const Twine &Suffix) const;
424
425 /// @}
426 /// @name Output & Conversion.
427 /// @{
428
429 /// Return the twine contents as a std::string.
430 std::string str() const;
431
432 /// Append the concatenated string into the given SmallString or SmallVector.
433 void toVector(SmallVectorImpl<char> &Out) const;
434
435 /// This returns the twine as a single StringRef. This method is only valid
436 /// if isSingleStringRef() is true.
437 StringRef getSingleStringRef() const {
438 assert(isSingleStringRef() &&"This cannot be had as a single stringref!")(static_cast <bool> (isSingleStringRef() &&"This cannot be had as a single stringref!"
) ? void (0) : __assert_fail ("isSingleStringRef() &&\"This cannot be had as a single stringref!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/ADT/Twine.h"
, 438, __extension__ __PRETTY_FUNCTION__))
;
439 switch (getLHSKind()) {
440 default: llvm_unreachable("Out of sync with isSingleStringRef")::llvm::llvm_unreachable_internal("Out of sync with isSingleStringRef"
, "/build/llvm-toolchain-snapshot-6.0~svn321108/include/llvm/ADT/Twine.h"
, 440)
;
441 case EmptyKind: return StringRef();
442 case CStringKind: return StringRef(LHS.cString);
443 case StdStringKind: return StringRef(*LHS.stdString);
444 case StringRefKind: return *LHS.stringRef;
445 case SmallStringKind:
446 return StringRef(LHS.smallString->data(), LHS.smallString->size());
447 }
448 }
449
450 /// This returns the twine as a single StringRef if it can be
451 /// represented as such. Otherwise the twine is written into the given
452 /// SmallVector and a StringRef to the SmallVector's data is returned.
453 StringRef toStringRef(SmallVectorImpl<char> &Out) const {
454 if (isSingleStringRef())
455 return getSingleStringRef();
456 toVector(Out);
457 return StringRef(Out.data(), Out.size());
458 }
459
460 /// This returns the twine as a single null terminated StringRef if it
461 /// can be represented as such. Otherwise the twine is written into the
462 /// given SmallVector and a StringRef to the SmallVector's data is returned.
463 ///
464 /// The returned StringRef's size does not include the null terminator.
465 StringRef toNullTerminatedStringRef(SmallVectorImpl<char> &Out) const;
466
467 /// Write the concatenated string represented by this twine to the
468 /// stream \p OS.
469 void print(raw_ostream &OS) const;
470
471 /// Dump the concatenated string represented by this twine to stderr.
472 void dump() const;
473
474 /// Write the representation of this twine to the stream \p OS.
475 void printRepr(raw_ostream &OS) const;
476
477 /// Dump the representation of this twine to stderr.
478 void dumpRepr() const;
479
480 /// @}
481 };
482
483 /// @name Twine Inline Implementations
484 /// @{
485
486 inline Twine Twine::concat(const Twine &Suffix) const {
487 // Concatenation with null is null.
488 if (isNull() || Suffix.isNull())
489 return Twine(NullKind);
490
491 // Concatenation with empty yields the other side.
492 if (isEmpty())
493 return Suffix;
494 if (Suffix.isEmpty())
495 return *this;
496
497 // Otherwise we need to create a new node, taking care to fold in unary
498 // twines.
499 Child NewLHS, NewRHS;
500 NewLHS.twine = this;
501 NewRHS.twine = &Suffix;
502 NodeKind NewLHSKind = TwineKind, NewRHSKind = TwineKind;
503 if (isUnary()) {
504 NewLHS = LHS;
505 NewLHSKind = getLHSKind();
506 }
507 if (Suffix.isUnary()) {
508 NewRHS = Suffix.LHS;
509 NewRHSKind = Suffix.getLHSKind();
510 }
511
512 return Twine(NewLHS, NewLHSKind, NewRHS, NewRHSKind);
513 }
514
515 inline Twine operator+(const Twine &LHS, const Twine &RHS) {
516 return LHS.concat(RHS);
517 }
518
519 /// Additional overload to guarantee simplified codegen; this is equivalent to
520 /// concat().
521
522 inline Twine operator+(const char *LHS, const StringRef &RHS) {
523 return Twine(LHS, RHS);
524 }
525
526 /// Additional overload to guarantee simplified codegen; this is equivalent to
527 /// concat().
528
529 inline Twine operator+(const StringRef &LHS, const char *RHS) {
530 return Twine(LHS, RHS);
531 }
532
533 inline raw_ostream &operator<<(raw_ostream &OS, const Twine &RHS) {
534 RHS.print(OS);
535 return OS;
536 }
537
538 /// @}
539
540} // end namespace llvm
541
542#endif // LLVM_ADT_TWINE_H