LLVM  6.0.0svn
AArch64AsmParser.cpp
Go to the documentation of this file.
1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
14 #include "Utils/AArch64BaseInfo.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/ADT/StringMap.h"
22 #include "llvm/ADT/StringRef.h"
23 #include "llvm/ADT/StringSwitch.h"
24 #include "llvm/ADT/Twine.h"
25 #include "llvm/MC/MCContext.h"
26 #include "llvm/MC/MCExpr.h"
27 #include "llvm/MC/MCInst.h"
35 #include "llvm/MC/MCRegisterInfo.h"
36 #include "llvm/MC/MCStreamer.h"
38 #include "llvm/MC/MCSymbol.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/Compiler.h"
45 #include "llvm/Support/SMLoc.h"
49 #include <cassert>
50 #include <cctype>
51 #include <cstdint>
52 #include <cstdio>
53 #include <string>
54 #include <tuple>
55 #include <utility>
56 #include <vector>
57 
58 using namespace llvm;
59 
60 namespace {
61 
62 enum class RegKind {Scalar, NeonVector, SVEDataVector};
63 
64 class AArch64AsmParser : public MCTargetAsmParser {
65 private:
66  StringRef Mnemonic; ///< Instruction mnemonic.
67 
68  // Map of register aliases registers via the .req directive.
70 
71  AArch64TargetStreamer &getTargetStreamer() {
72  MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
73  return static_cast<AArch64TargetStreamer &>(TS);
74  }
75 
76  SMLoc getLoc() const { return getParser().getTok().getLoc(); }
77 
78  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
79  void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
80  AArch64CC::CondCode parseCondCodeString(StringRef Cond);
81  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
82  unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
83  int tryParseRegister();
84  int tryMatchVectorRegister(StringRef &Kind, bool expected);
85  bool parseRegister(OperandVector &Operands);
86  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
87  bool parseVectorList(OperandVector &Operands);
88  bool parseOperand(OperandVector &Operands, bool isCondCode,
89  bool invertCondCode);
90 
91  bool showMatchError(SMLoc Loc, unsigned ErrCode, OperandVector &Operands);
92 
93  bool parseDirectiveArch(SMLoc L);
94  bool parseDirectiveCPU(SMLoc L);
95  bool parseDirectiveWord(unsigned Size, SMLoc L);
96  bool parseDirectiveInst(SMLoc L);
97 
98  bool parseDirectiveTLSDescCall(SMLoc L);
99 
100  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
101  bool parseDirectiveLtorg(SMLoc L);
102 
103  bool parseDirectiveReq(StringRef Name, SMLoc L);
104  bool parseDirectiveUnreq(SMLoc L);
105 
106  bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
107  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
108  OperandVector &Operands, MCStreamer &Out,
109  uint64_t &ErrorInfo,
110  bool MatchingInlineAsm) override;
111 /// @name Auto-generated Match Functions
112 /// {
113 
114 #define GET_ASSEMBLER_HEADER
115 #include "AArch64GenAsmMatcher.inc"
116 
117  /// }
118 
119  OperandMatchResultTy tryParseSVERegister(int &Reg, StringRef &Kind,
120  RegKind MatchKind);
121  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
122  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
123  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
124  OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
125  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
126  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
127  OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
128  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
129  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
130  OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
131  OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
132  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
133  bool tryParseNeonVectorRegister(OperandVector &Operands);
134  OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
135  template <bool ParseSuffix>
136  OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
137 
138 public:
139  enum AArch64MatchResultTy {
140  Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
141 #define GET_OPERAND_DIAGNOSTIC_TYPES
142 #include "AArch64GenAsmMatcher.inc"
143  };
144  bool IsILP32;
145 
146  AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
147  const MCInstrInfo &MII, const MCTargetOptions &Options)
148  : MCTargetAsmParser(Options, STI, MII) {
149  IsILP32 = Options.getABIName() == "ilp32";
151  MCStreamer &S = getParser().getStreamer();
152  if (S.getTargetStreamer() == nullptr)
153  new AArch64TargetStreamer(S);
154 
155  // Initialize the set of available features.
156  setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
157  }
158 
159  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
160  SMLoc NameLoc, OperandVector &Operands) override;
161  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
162  bool ParseDirective(AsmToken DirectiveID) override;
163  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
164  unsigned Kind) override;
165 
166  static bool classifySymbolRef(const MCExpr *Expr,
167  AArch64MCExpr::VariantKind &ELFRefKind,
168  MCSymbolRefExpr::VariantKind &DarwinRefKind,
169  int64_t &Addend);
170 };
171 
172 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
173 /// instruction.
174 class AArch64Operand : public MCParsedAsmOperand {
175 private:
176  enum KindTy {
177  k_Immediate,
178  k_ShiftedImm,
179  k_CondCode,
180  k_Register,
181  k_VectorList,
182  k_VectorIndex,
183  k_Token,
184  k_SysReg,
185  k_SysCR,
186  k_Prefetch,
187  k_ShiftExtend,
188  k_FPImm,
189  k_Barrier,
190  k_PSBHint,
191  } Kind;
192 
193  SMLoc StartLoc, EndLoc;
194 
195  struct TokOp {
196  const char *Data;
197  unsigned Length;
198  bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
199  };
200 
201  struct RegOp {
202  unsigned RegNum;
203  RegKind Kind;
204 
205  int ElementWidth;
206  };
207 
208  struct VectorListOp {
209  unsigned RegNum;
210  unsigned Count;
211  unsigned NumElements;
212  unsigned ElementKind;
213  };
214 
215  struct VectorIndexOp {
216  unsigned Val;
217  };
218 
219  struct ImmOp {
220  const MCExpr *Val;
221  };
222 
223  struct ShiftedImmOp {
224  const MCExpr *Val;
225  unsigned ShiftAmount;
226  };
227 
228  struct CondCodeOp {
229  AArch64CC::CondCode Code;
230  };
231 
232  struct FPImmOp {
233  unsigned Val; // Encoded 8-bit representation.
234  };
235 
236  struct BarrierOp {
237  const char *Data;
238  unsigned Length;
239  unsigned Val; // Not the enum since not all values have names.
240  };
241 
242  struct SysRegOp {
243  const char *Data;
244  unsigned Length;
245  uint32_t MRSReg;
246  uint32_t MSRReg;
247  uint32_t PStateField;
248  };
249 
250  struct SysCRImmOp {
251  unsigned Val;
252  };
253 
254  struct PrefetchOp {
255  const char *Data;
256  unsigned Length;
257  unsigned Val;
258  };
259 
260  struct PSBHintOp {
261  const char *Data;
262  unsigned Length;
263  unsigned Val;
264  };
265 
266  struct ShiftExtendOp {
268  unsigned Amount;
269  bool HasExplicitAmount;
270  };
271 
272  struct ExtendOp {
273  unsigned Val;
274  };
275 
276  union {
277  struct TokOp Tok;
278  struct RegOp Reg;
279  struct VectorListOp VectorList;
280  struct VectorIndexOp VectorIndex;
281  struct ImmOp Imm;
282  struct ShiftedImmOp ShiftedImm;
283  struct CondCodeOp CondCode;
284  struct FPImmOp FPImm;
285  struct BarrierOp Barrier;
286  struct SysRegOp SysReg;
287  struct SysCRImmOp SysCRImm;
288  struct PrefetchOp Prefetch;
289  struct PSBHintOp PSBHint;
290  struct ShiftExtendOp ShiftExtend;
291  };
292 
293  // Keep the MCContext around as the MCExprs may need manipulated during
294  // the add<>Operands() calls.
295  MCContext &Ctx;
296 
297 public:
298  AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
299 
300  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
301  Kind = o.Kind;
302  StartLoc = o.StartLoc;
303  EndLoc = o.EndLoc;
304  switch (Kind) {
305  case k_Token:
306  Tok = o.Tok;
307  break;
308  case k_Immediate:
309  Imm = o.Imm;
310  break;
311  case k_ShiftedImm:
312  ShiftedImm = o.ShiftedImm;
313  break;
314  case k_CondCode:
315  CondCode = o.CondCode;
316  break;
317  case k_FPImm:
318  FPImm = o.FPImm;
319  break;
320  case k_Barrier:
321  Barrier = o.Barrier;
322  break;
323  case k_Register:
324  Reg = o.Reg;
325  break;
326  case k_VectorList:
327  VectorList = o.VectorList;
328  break;
329  case k_VectorIndex:
330  VectorIndex = o.VectorIndex;
331  break;
332  case k_SysReg:
333  SysReg = o.SysReg;
334  break;
335  case k_SysCR:
336  SysCRImm = o.SysCRImm;
337  break;
338  case k_Prefetch:
339  Prefetch = o.Prefetch;
340  break;
341  case k_PSBHint:
342  PSBHint = o.PSBHint;
343  break;
344  case k_ShiftExtend:
345  ShiftExtend = o.ShiftExtend;
346  break;
347  }
348  }
349 
350  /// getStartLoc - Get the location of the first token of this operand.
351  SMLoc getStartLoc() const override { return StartLoc; }
352  /// getEndLoc - Get the location of the last token of this operand.
353  SMLoc getEndLoc() const override { return EndLoc; }
354 
355  StringRef getToken() const {
356  assert(Kind == k_Token && "Invalid access!");
357  return StringRef(Tok.Data, Tok.Length);
358  }
359 
360  bool isTokenSuffix() const {
361  assert(Kind == k_Token && "Invalid access!");
362  return Tok.IsSuffix;
363  }
364 
365  const MCExpr *getImm() const {
366  assert(Kind == k_Immediate && "Invalid access!");
367  return Imm.Val;
368  }
369 
370  const MCExpr *getShiftedImmVal() const {
371  assert(Kind == k_ShiftedImm && "Invalid access!");
372  return ShiftedImm.Val;
373  }
374 
375  unsigned getShiftedImmShift() const {
376  assert(Kind == k_ShiftedImm && "Invalid access!");
377  return ShiftedImm.ShiftAmount;
378  }
379 
380  AArch64CC::CondCode getCondCode() const {
381  assert(Kind == k_CondCode && "Invalid access!");
382  return CondCode.Code;
383  }
384 
385  unsigned getFPImm() const {
386  assert(Kind == k_FPImm && "Invalid access!");
387  return FPImm.Val;
388  }
389 
390  unsigned getBarrier() const {
391  assert(Kind == k_Barrier && "Invalid access!");
392  return Barrier.Val;
393  }
394 
395  StringRef getBarrierName() const {
396  assert(Kind == k_Barrier && "Invalid access!");
397  return StringRef(Barrier.Data, Barrier.Length);
398  }
399 
400  unsigned getReg() const override {
401  assert(Kind == k_Register && "Invalid access!");
402  return Reg.RegNum;
403  }
404 
405  unsigned getVectorListStart() const {
406  assert(Kind == k_VectorList && "Invalid access!");
407  return VectorList.RegNum;
408  }
409 
410  unsigned getVectorListCount() const {
411  assert(Kind == k_VectorList && "Invalid access!");
412  return VectorList.Count;
413  }
414 
415  unsigned getVectorIndex() const {
416  assert(Kind == k_VectorIndex && "Invalid access!");
417  return VectorIndex.Val;
418  }
419 
420  StringRef getSysReg() const {
421  assert(Kind == k_SysReg && "Invalid access!");
422  return StringRef(SysReg.Data, SysReg.Length);
423  }
424 
425  unsigned getSysCR() const {
426  assert(Kind == k_SysCR && "Invalid access!");
427  return SysCRImm.Val;
428  }
429 
430  unsigned getPrefetch() const {
431  assert(Kind == k_Prefetch && "Invalid access!");
432  return Prefetch.Val;
433  }
434 
435  unsigned getPSBHint() const {
436  assert(Kind == k_PSBHint && "Invalid access!");
437  return PSBHint.Val;
438  }
439 
440  StringRef getPSBHintName() const {
441  assert(Kind == k_PSBHint && "Invalid access!");
442  return StringRef(PSBHint.Data, PSBHint.Length);
443  }
444 
445  StringRef getPrefetchName() const {
446  assert(Kind == k_Prefetch && "Invalid access!");
447  return StringRef(Prefetch.Data, Prefetch.Length);
448  }
449 
450  AArch64_AM::ShiftExtendType getShiftExtendType() const {
451  assert(Kind == k_ShiftExtend && "Invalid access!");
452  return ShiftExtend.Type;
453  }
454 
455  unsigned getShiftExtendAmount() const {
456  assert(Kind == k_ShiftExtend && "Invalid access!");
457  return ShiftExtend.Amount;
458  }
459 
460  bool hasShiftExtendAmount() const {
461  assert(Kind == k_ShiftExtend && "Invalid access!");
462  return ShiftExtend.HasExplicitAmount;
463  }
464 
465  bool isImm() const override { return Kind == k_Immediate; }
466  bool isMem() const override { return false; }
467  bool isSImm9() const {
468  if (!isImm())
469  return false;
470  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
471  if (!MCE)
472  return false;
473  int64_t Val = MCE->getValue();
474  return (Val >= -256 && Val < 256);
475  }
476  bool isSImm10s8() const {
477  if (!isImm())
478  return false;
479  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
480  if (!MCE)
481  return false;
482  int64_t Val = MCE->getValue();
483  return (Val >= -4096 && Val < 4089 && (Val & 7) == 0);
484  }
485  bool isSImm7s4() const {
486  if (!isImm())
487  return false;
488  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
489  if (!MCE)
490  return false;
491  int64_t Val = MCE->getValue();
492  return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
493  }
494  bool isSImm7s8() const {
495  if (!isImm())
496  return false;
497  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
498  if (!MCE)
499  return false;
500  int64_t Val = MCE->getValue();
501  return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
502  }
503  bool isSImm7s16() const {
504  if (!isImm())
505  return false;
506  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
507  if (!MCE)
508  return false;
509  int64_t Val = MCE->getValue();
510  return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
511  }
512 
513  bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
514  AArch64MCExpr::VariantKind ELFRefKind;
515  MCSymbolRefExpr::VariantKind DarwinRefKind;
516  int64_t Addend;
517  if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
518  Addend)) {
519  // If we don't understand the expression, assume the best and
520  // let the fixup and relocation code deal with it.
521  return true;
522  }
523 
524  if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
525  ELFRefKind == AArch64MCExpr::VK_LO12 ||
526  ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
527  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
528  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
529  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
530  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
531  ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
532  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
533  // Note that we don't range-check the addend. It's adjusted modulo page
534  // size when converted, so there is no "out of range" condition when using
535  // @pageoff.
536  return Addend >= 0 && (Addend % Scale) == 0;
537  } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
538  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
539  // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
540  return Addend == 0;
541  }
542 
543  return false;
544  }
545 
546  template <int Scale> bool isUImm12Offset() const {
547  if (!isImm())
548  return false;
549 
550  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
551  if (!MCE)
552  return isSymbolicUImm12Offset(getImm(), Scale);
553 
554  int64_t Val = MCE->getValue();
555  return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
556  }
557 
558  template <int N, int M>
559  bool isImmInRange() const {
560  if (!isImm())
561  return false;
562  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
563  if (!MCE)
564  return false;
565  int64_t Val = MCE->getValue();
566  return (Val >= N && Val <= M);
567  }
568 
569  bool isLogicalImm32() const {
570  if (!isImm())
571  return false;
572  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
573  if (!MCE)
574  return false;
575  int64_t Val = MCE->getValue();
576  if (Val >> 32 != 0 && Val >> 32 != ~0LL)
577  return false;
578  Val &= 0xFFFFFFFF;
579  return AArch64_AM::isLogicalImmediate(Val, 32);
580  }
581 
582  bool isLogicalImm64() const {
583  if (!isImm())
584  return false;
585  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
586  if (!MCE)
587  return false;
588  return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
589  }
590 
591  bool isLogicalImm32Not() const {
592  if (!isImm())
593  return false;
594  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
595  if (!MCE)
596  return false;
597  int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
598  return AArch64_AM::isLogicalImmediate(Val, 32);
599  }
600 
601  bool isLogicalImm64Not() const {
602  if (!isImm())
603  return false;
604  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
605  if (!MCE)
606  return false;
607  return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
608  }
609 
610  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
611 
612  bool isAddSubImm() const {
613  if (!isShiftedImm() && !isImm())
614  return false;
615 
616  const MCExpr *Expr;
617 
618  // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
619  if (isShiftedImm()) {
620  unsigned Shift = ShiftedImm.ShiftAmount;
621  Expr = ShiftedImm.Val;
622  if (Shift != 0 && Shift != 12)
623  return false;
624  } else {
625  Expr = getImm();
626  }
627 
628  AArch64MCExpr::VariantKind ELFRefKind;
629  MCSymbolRefExpr::VariantKind DarwinRefKind;
630  int64_t Addend;
631  if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
632  DarwinRefKind, Addend)) {
633  return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
634  || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
635  || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
636  || ELFRefKind == AArch64MCExpr::VK_LO12
637  || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
638  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
639  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
640  || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
641  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
642  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
643  || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
644  }
645 
646  // If it's a constant, it should be a real immediate in range:
647  if (auto *CE = dyn_cast<MCConstantExpr>(Expr))
648  return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
649 
650  // If it's an expression, we hope for the best and let the fixup/relocation
651  // code deal with it.
652  return true;
653  }
654 
655  bool isAddSubImmNeg() const {
656  if (!isShiftedImm() && !isImm())
657  return false;
658 
659  const MCExpr *Expr;
660 
661  // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
662  if (isShiftedImm()) {
663  unsigned Shift = ShiftedImm.ShiftAmount;
664  Expr = ShiftedImm.Val;
665  if (Shift != 0 && Shift != 12)
666  return false;
667  } else
668  Expr = getImm();
669 
670  // Otherwise it should be a real negative immediate in range:
671  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
672  return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
673  }
674 
675  bool isCondCode() const { return Kind == k_CondCode; }
676 
677  bool isSIMDImmType10() const {
678  if (!isImm())
679  return false;
680  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
681  if (!MCE)
682  return false;
684  }
685 
686  template<int N>
687  bool isBranchTarget() const {
688  if (!isImm())
689  return false;
690  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
691  if (!MCE)
692  return true;
693  int64_t Val = MCE->getValue();
694  if (Val & 0x3)
695  return false;
696  assert(N > 0 && "Branch target immediate cannot be 0 bits!");
697  return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
698  }
699 
700  bool
701  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
702  if (!isImm())
703  return false;
704 
705  AArch64MCExpr::VariantKind ELFRefKind;
706  MCSymbolRefExpr::VariantKind DarwinRefKind;
707  int64_t Addend;
708  if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
709  DarwinRefKind, Addend)) {
710  return false;
711  }
712  if (DarwinRefKind != MCSymbolRefExpr::VK_None)
713  return false;
714 
715  for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
716  if (ELFRefKind == AllowedModifiers[i])
717  return Addend == 0;
718  }
719 
720  return false;
721  }
722 
723  bool isMovZSymbolG3() const {
724  return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
725  }
726 
727  bool isMovZSymbolG2() const {
731  }
732 
733  bool isMovZSymbolG1() const {
734  return isMovWSymbol({
738  });
739  }
740 
741  bool isMovZSymbolG0() const {
745  }
746 
747  bool isMovKSymbolG3() const {
748  return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
749  }
750 
751  bool isMovKSymbolG2() const {
752  return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
753  }
754 
755  bool isMovKSymbolG1() const {
756  return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
759  }
760 
761  bool isMovKSymbolG0() const {
762  return isMovWSymbol(
765  }
766 
767  template<int RegWidth, int Shift>
768  bool isMOVZMovAlias() const {
769  if (!isImm()) return false;
770 
771  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
772  if (!CE) return false;
773  uint64_t Value = CE->getValue();
774 
775  return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
776  }
777 
778  template<int RegWidth, int Shift>
779  bool isMOVNMovAlias() const {
780  if (!isImm()) return false;
781 
782  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
783  if (!CE) return false;
784  uint64_t Value = CE->getValue();
785 
786  return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
787  }
788 
789  bool isFPImm() const { return Kind == k_FPImm; }
790  bool isBarrier() const { return Kind == k_Barrier; }
791  bool isSysReg() const { return Kind == k_SysReg; }
792 
793  bool isMRSSystemRegister() const {
794  if (!isSysReg()) return false;
795 
796  return SysReg.MRSReg != -1U;
797  }
798 
799  bool isMSRSystemRegister() const {
800  if (!isSysReg()) return false;
801  return SysReg.MSRReg != -1U;
802  }
803 
804  bool isSystemPStateFieldWithImm0_1() const {
805  if (!isSysReg()) return false;
806  return (SysReg.PStateField == AArch64PState::PAN ||
807  SysReg.PStateField == AArch64PState::UAO);
808  }
809 
810  bool isSystemPStateFieldWithImm0_15() const {
811  if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
812  return SysReg.PStateField != -1U;
813  }
814 
815  bool isReg() const override {
816  return Kind == k_Register && Reg.Kind == RegKind::Scalar;
817  }
818 
819  bool isNeonVectorReg() const {
820  return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
821  }
822 
823  bool isNeonVectorRegLo() const {
824  return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
825  AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
826  Reg.RegNum);
827  }
828 
829  template <unsigned Class = AArch64::ZPRRegClassID>
830  bool isSVEDataVectorReg() const {
831  return (Kind == k_Register && Reg.Kind == RegKind::SVEDataVector) &&
832  AArch64MCRegisterClasses[Class].contains(getReg());
833  }
834 
835  template <int ElementWidth> bool isSVEDataVectorRegOfWidth() const {
836  return isSVEDataVectorReg() &&
837  (ElementWidth == -1 || Reg.ElementWidth == ElementWidth);
838  }
839 
840  bool isGPR32as64() const {
841  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
842  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
843  }
844 
845  bool isWSeqPair() const {
846  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
847  AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
848  Reg.RegNum);
849  }
850 
851  bool isXSeqPair() const {
852  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
853  AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
854  Reg.RegNum);
855  }
856 
857  bool isGPR64sp0() const {
858  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
859  AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
860  }
861 
862  template<int64_t Angle, int64_t Remainder>
863  bool isComplexRotation() const {
864  if (!isImm()) return false;
865 
866  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
867  if (!CE) return false;
868  uint64_t Value = CE->getValue();
869 
870  return (Value % Angle == Remainder && Value <= 270);
871  }
872 
873  /// Is this a vector list with the type implicit (presumably attached to the
874  /// instruction itself)?
875  template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
876  return Kind == k_VectorList && VectorList.Count == NumRegs &&
877  !VectorList.ElementKind;
878  }
879 
880  template <unsigned NumRegs, unsigned NumElements, char ElementKind>
881  bool isTypedVectorList() const {
882  if (Kind != k_VectorList)
883  return false;
884  if (VectorList.Count != NumRegs)
885  return false;
886  if (VectorList.ElementKind != ElementKind)
887  return false;
888  return VectorList.NumElements == NumElements;
889  }
890 
891  bool isVectorIndex1() const {
892  return Kind == k_VectorIndex && VectorIndex.Val == 1;
893  }
894 
895  bool isVectorIndexB() const {
896  return Kind == k_VectorIndex && VectorIndex.Val < 16;
897  }
898 
899  bool isVectorIndexH() const {
900  return Kind == k_VectorIndex && VectorIndex.Val < 8;
901  }
902 
903  bool isVectorIndexS() const {
904  return Kind == k_VectorIndex && VectorIndex.Val < 4;
905  }
906 
907  bool isVectorIndexD() const {
908  return Kind == k_VectorIndex && VectorIndex.Val < 2;
909  }
910 
911  bool isToken() const override { return Kind == k_Token; }
912 
913  bool isTokenEqual(StringRef Str) const {
914  return Kind == k_Token && getToken() == Str;
915  }
916  bool isSysCR() const { return Kind == k_SysCR; }
917  bool isPrefetch() const { return Kind == k_Prefetch; }
918  bool isPSBHint() const { return Kind == k_PSBHint; }
919  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
920  bool isShifter() const {
921  if (!isShiftExtend())
922  return false;
923 
924  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
925  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
926  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
927  ST == AArch64_AM::MSL);
928  }
929  bool isExtend() const {
930  if (!isShiftExtend())
931  return false;
932 
933  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
934  return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
935  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
936  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
937  ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
938  ET == AArch64_AM::LSL) &&
939  getShiftExtendAmount() <= 4;
940  }
941 
942  bool isExtend64() const {
943  if (!isExtend())
944  return false;
945  // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
946  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
947  return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
948  }
949 
950  bool isExtendLSL64() const {
951  if (!isExtend())
952  return false;
953  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
954  return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
955  ET == AArch64_AM::LSL) &&
956  getShiftExtendAmount() <= 4;
957  }
958 
959  template<int Width> bool isMemXExtend() const {
960  if (!isExtend())
961  return false;
962  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
963  return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
964  (getShiftExtendAmount() == Log2_32(Width / 8) ||
965  getShiftExtendAmount() == 0);
966  }
967 
968  template<int Width> bool isMemWExtend() const {
969  if (!isExtend())
970  return false;
971  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
972  return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
973  (getShiftExtendAmount() == Log2_32(Width / 8) ||
974  getShiftExtendAmount() == 0);
975  }
976 
977  template <unsigned width>
978  bool isArithmeticShifter() const {
979  if (!isShifter())
980  return false;
981 
982  // An arithmetic shifter is LSL, LSR, or ASR.
983  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
984  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
985  ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
986  }
987 
988  template <unsigned width>
989  bool isLogicalShifter() const {
990  if (!isShifter())
991  return false;
992 
993  // A logical shifter is LSL, LSR, ASR or ROR.
994  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
995  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
996  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
997  getShiftExtendAmount() < width;
998  }
999 
1000  bool isMovImm32Shifter() const {
1001  if (!isShifter())
1002  return false;
1003 
1004  // A MOVi shifter is LSL of 0, 16, 32, or 48.
1005  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1006  if (ST != AArch64_AM::LSL)
1007  return false;
1008  uint64_t Val = getShiftExtendAmount();
1009  return (Val == 0 || Val == 16);
1010  }
1011 
1012  bool isMovImm64Shifter() const {
1013  if (!isShifter())
1014  return false;
1015 
1016  // A MOVi shifter is LSL of 0 or 16.
1017  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1018  if (ST != AArch64_AM::LSL)
1019  return false;
1020  uint64_t Val = getShiftExtendAmount();
1021  return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1022  }
1023 
1024  bool isLogicalVecShifter() const {
1025  if (!isShifter())
1026  return false;
1027 
1028  // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1029  unsigned Shift = getShiftExtendAmount();
1030  return getShiftExtendType() == AArch64_AM::LSL &&
1031  (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1032  }
1033 
1034  bool isLogicalVecHalfWordShifter() const {
1035  if (!isLogicalVecShifter())
1036  return false;
1037 
1038  // A logical vector shifter is a left shift by 0 or 8.
1039  unsigned Shift = getShiftExtendAmount();
1040  return getShiftExtendType() == AArch64_AM::LSL &&
1041  (Shift == 0 || Shift == 8);
1042  }
1043 
1044  bool isMoveVecShifter() const {
1045  if (!isShiftExtend())
1046  return false;
1047 
1048  // A logical vector shifter is a left shift by 8 or 16.
1049  unsigned Shift = getShiftExtendAmount();
1050  return getShiftExtendType() == AArch64_AM::MSL &&
1051  (Shift == 8 || Shift == 16);
1052  }
1053 
1054  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1055  // to LDUR/STUR when the offset is not legal for the former but is for
1056  // the latter. As such, in addition to checking for being a legal unscaled
1057  // address, also check that it is not a legal scaled address. This avoids
1058  // ambiguity in the matcher.
1059  template<int Width>
1060  bool isSImm9OffsetFB() const {
1061  return isSImm9() && !isUImm12Offset<Width / 8>();
1062  }
1063 
1064  bool isAdrpLabel() const {
1065  // Validation was handled during parsing, so we just sanity check that
1066  // something didn't go haywire.
1067  if (!isImm())
1068  return false;
1069 
1070  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1071  int64_t Val = CE->getValue();
1072  int64_t Min = - (4096 * (1LL << (21 - 1)));
1073  int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1074  return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1075  }
1076 
1077  return true;
1078  }
1079 
1080  bool isAdrLabel() const {
1081  // Validation was handled during parsing, so we just sanity check that
1082  // something didn't go haywire.
1083  if (!isImm())
1084  return false;
1085 
1086  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1087  int64_t Val = CE->getValue();
1088  int64_t Min = - (1LL << (21 - 1));
1089  int64_t Max = ((1LL << (21 - 1)) - 1);
1090  return Val >= Min && Val <= Max;
1091  }
1092 
1093  return true;
1094  }
1095 
1096  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1097  // Add as immediates when possible. Null MCExpr = 0.
1098  if (!Expr)
1100  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1101  Inst.addOperand(MCOperand::createImm(CE->getValue()));
1102  else
1103  Inst.addOperand(MCOperand::createExpr(Expr));
1104  }
1105 
1106  void addRegOperands(MCInst &Inst, unsigned N) const {
1107  assert(N == 1 && "Invalid number of operands!");
1109  }
1110 
1111  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1112  assert(N == 1 && "Invalid number of operands!");
1113  assert(
1114  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1115 
1116  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1117  uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1118  RI->getEncodingValue(getReg()));
1119 
1120  Inst.addOperand(MCOperand::createReg(Reg));
1121  }
1122 
1123  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1124  assert(N == 1 && "Invalid number of operands!");
1125  assert(
1126  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1127  Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1128  }
1129 
1130  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1131  assert(N == 1 && "Invalid number of operands!");
1132  assert(
1133  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1135  }
1136 
1137  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1138  assert(N == 1 && "Invalid number of operands!");
1140  }
1141 
1142  template <unsigned NumRegs>
1143  void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1144  assert(N == 1 && "Invalid number of operands!");
1145  static const unsigned FirstRegs[] = { AArch64::D0,
1146  AArch64::D0_D1,
1147  AArch64::D0_D1_D2,
1148  AArch64::D0_D1_D2_D3 };
1149  unsigned FirstReg = FirstRegs[NumRegs - 1];
1150 
1151  Inst.addOperand(
1152  MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1153  }
1154 
1155  template <unsigned NumRegs>
1156  void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1157  assert(N == 1 && "Invalid number of operands!");
1158  static const unsigned FirstRegs[] = { AArch64::Q0,
1159  AArch64::Q0_Q1,
1160  AArch64::Q0_Q1_Q2,
1161  AArch64::Q0_Q1_Q2_Q3 };
1162  unsigned FirstReg = FirstRegs[NumRegs - 1];
1163 
1164  Inst.addOperand(
1165  MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1166  }
1167 
1168  void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1169  assert(N == 1 && "Invalid number of operands!");
1170  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1171  }
1172 
1173  void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1174  assert(N == 1 && "Invalid number of operands!");
1175  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1176  }
1177 
1178  void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1179  assert(N == 1 && "Invalid number of operands!");
1180  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1181  }
1182 
1183  void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1184  assert(N == 1 && "Invalid number of operands!");
1185  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1186  }
1187 
1188  void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1189  assert(N == 1 && "Invalid number of operands!");
1190  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1191  }
1192 
1193  void addImmOperands(MCInst &Inst, unsigned N) const {
1194  assert(N == 1 && "Invalid number of operands!");
1195  // If this is a pageoff symrefexpr with an addend, adjust the addend
1196  // to be only the page-offset portion. Otherwise, just add the expr
1197  // as-is.
1198  addExpr(Inst, getImm());
1199  }
1200 
1201  void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1202  assert(N == 2 && "Invalid number of operands!");
1203  if (isShiftedImm()) {
1204  addExpr(Inst, getShiftedImmVal());
1205  Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1206  } else {
1207  addExpr(Inst, getImm());
1209  }
1210  }
1211 
1212  void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1213  assert(N == 2 && "Invalid number of operands!");
1214 
1215  const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1216  const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1217  int64_t Val = -CE->getValue();
1218  unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1219 
1220  Inst.addOperand(MCOperand::createImm(Val));
1221  Inst.addOperand(MCOperand::createImm(ShiftAmt));
1222  }
1223 
1224  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1225  assert(N == 1 && "Invalid number of operands!");
1226  Inst.addOperand(MCOperand::createImm(getCondCode()));
1227  }
1228 
1229  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1230  assert(N == 1 && "Invalid number of operands!");
1231  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1232  if (!MCE)
1233  addExpr(Inst, getImm());
1234  else
1235  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1236  }
1237 
1238  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1239  addImmOperands(Inst, N);
1240  }
1241 
1242  template<int Scale>
1243  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1244  assert(N == 1 && "Invalid number of operands!");
1245  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1246 
1247  if (!MCE) {
1248  Inst.addOperand(MCOperand::createExpr(getImm()));
1249  return;
1250  }
1251  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1252  }
1253 
1254  void addSImm9Operands(MCInst &Inst, unsigned N) const {
1255  assert(N == 1 && "Invalid number of operands!");
1256  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1258  }
1259 
1260  void addSImm10s8Operands(MCInst &Inst, unsigned N) const {
1261  assert(N == 1 && "Invalid number of operands!");
1262  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1263  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1264  }
1265 
1266  void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1267  assert(N == 1 && "Invalid number of operands!");
1268  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1269  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1270  }
1271 
1272  void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1273  assert(N == 1 && "Invalid number of operands!");
1274  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1275  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1276  }
1277 
1278  void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1279  assert(N == 1 && "Invalid number of operands!");
1280  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1281  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1282  }
1283 
1284  void addImm0_1Operands(MCInst &Inst, unsigned N) const {
1285  assert(N == 1 && "Invalid number of operands!");
1286  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1288  }
1289 
1290  void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1291  assert(N == 1 && "Invalid number of operands!");
1292  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1294  }
1295 
1296  void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1297  assert(N == 1 && "Invalid number of operands!");
1298  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1300  }
1301 
1302  void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1303  assert(N == 1 && "Invalid number of operands!");
1304  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1306  }
1307 
1308  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1309  assert(N == 1 && "Invalid number of operands!");
1310  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1311  assert(MCE && "Invalid constant immediate operand!");
1313  }
1314 
1315  void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1316  assert(N == 1 && "Invalid number of operands!");
1317  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1319  }
1320 
1321  void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1322  assert(N == 1 && "Invalid number of operands!");
1323  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1325  }
1326 
1327  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1328  assert(N == 1 && "Invalid number of operands!");
1329  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1331  }
1332 
1333  void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1334  assert(N == 1 && "Invalid number of operands!");
1335  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1337  }
1338 
1339  void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1340  assert(N == 1 && "Invalid number of operands!");
1341  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1343  }
1344 
1345  void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1346  assert(N == 1 && "Invalid number of operands!");
1347  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1349  }
1350 
1351  void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1352  assert(N == 1 && "Invalid number of operands!");
1353  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1355  }
1356 
1357  void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1358  assert(N == 1 && "Invalid number of operands!");
1359  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1361  }
1362 
1363  void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1364  assert(N == 1 && "Invalid number of operands!");
1365  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1367  }
1368 
1369  void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1370  assert(N == 1 && "Invalid number of operands!");
1371  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1373  }
1374 
1375  void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1376  assert(N == 1 && "Invalid number of operands!");
1377  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1378  uint64_t encoding =
1379  AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1380  Inst.addOperand(MCOperand::createImm(encoding));
1381  }
1382 
1383  void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1384  assert(N == 1 && "Invalid number of operands!");
1385  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1386  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1387  Inst.addOperand(MCOperand::createImm(encoding));
1388  }
1389 
1390  void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1391  assert(N == 1 && "Invalid number of operands!");
1392  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1393  int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1394  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1395  Inst.addOperand(MCOperand::createImm(encoding));
1396  }
1397 
1398  void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1399  assert(N == 1 && "Invalid number of operands!");
1400  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1401  uint64_t encoding =
1403  Inst.addOperand(MCOperand::createImm(encoding));
1404  }
1405 
1406  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1407  assert(N == 1 && "Invalid number of operands!");
1408  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1409  uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1410  Inst.addOperand(MCOperand::createImm(encoding));
1411  }
1412 
1413  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1414  // Branch operands don't encode the low bits, so shift them off
1415  // here. If it's a label, however, just put it on directly as there's
1416  // not enough information now to do anything.
1417  assert(N == 1 && "Invalid number of operands!");
1418  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1419  if (!MCE) {
1420  addExpr(Inst, getImm());
1421  return;
1422  }
1423  assert(MCE && "Invalid constant immediate operand!");
1424  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1425  }
1426 
1427  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1428  // Branch operands don't encode the low bits, so shift them off
1429  // here. If it's a label, however, just put it on directly as there's
1430  // not enough information now to do anything.
1431  assert(N == 1 && "Invalid number of operands!");
1432  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1433  if (!MCE) {
1434  addExpr(Inst, getImm());
1435  return;
1436  }
1437  assert(MCE && "Invalid constant immediate operand!");
1438  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1439  }
1440 
1441  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1442  // Branch operands don't encode the low bits, so shift them off
1443  // here. If it's a label, however, just put it on directly as there's
1444  // not enough information now to do anything.
1445  assert(N == 1 && "Invalid number of operands!");
1446  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1447  if (!MCE) {
1448  addExpr(Inst, getImm());
1449  return;
1450  }
1451  assert(MCE && "Invalid constant immediate operand!");
1452  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1453  }
1454 
1455  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1456  assert(N == 1 && "Invalid number of operands!");
1457  Inst.addOperand(MCOperand::createImm(getFPImm()));
1458  }
1459 
1460  void addBarrierOperands(MCInst &Inst, unsigned N) const {
1461  assert(N == 1 && "Invalid number of operands!");
1462  Inst.addOperand(MCOperand::createImm(getBarrier()));
1463  }
1464 
1465  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1466  assert(N == 1 && "Invalid number of operands!");
1467 
1468  Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1469  }
1470 
1471  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1472  assert(N == 1 && "Invalid number of operands!");
1473 
1474  Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1475  }
1476 
1477  void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1478  assert(N == 1 && "Invalid number of operands!");
1479 
1480  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1481  }
1482 
1483  void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1484  assert(N == 1 && "Invalid number of operands!");
1485 
1486  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1487  }
1488 
1489  void addSysCROperands(MCInst &Inst, unsigned N) const {
1490  assert(N == 1 && "Invalid number of operands!");
1491  Inst.addOperand(MCOperand::createImm(getSysCR()));
1492  }
1493 
1494  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1495  assert(N == 1 && "Invalid number of operands!");
1496  Inst.addOperand(MCOperand::createImm(getPrefetch()));
1497  }
1498 
1499  void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1500  assert(N == 1 && "Invalid number of operands!");
1501  Inst.addOperand(MCOperand::createImm(getPSBHint()));
1502  }
1503 
1504  void addShifterOperands(MCInst &Inst, unsigned N) const {
1505  assert(N == 1 && "Invalid number of operands!");
1506  unsigned Imm =
1507  AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1508  Inst.addOperand(MCOperand::createImm(Imm));
1509  }
1510 
1511  void addExtendOperands(MCInst &Inst, unsigned N) const {
1512  assert(N == 1 && "Invalid number of operands!");
1513  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1514  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1515  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1516  Inst.addOperand(MCOperand::createImm(Imm));
1517  }
1518 
1519  void addExtend64Operands(MCInst &Inst, unsigned N) const {
1520  assert(N == 1 && "Invalid number of operands!");
1521  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1522  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1523  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1524  Inst.addOperand(MCOperand::createImm(Imm));
1525  }
1526 
1527  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1528  assert(N == 2 && "Invalid number of operands!");
1529  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1530  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1531  Inst.addOperand(MCOperand::createImm(IsSigned));
1532  Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1533  }
1534 
1535  // For 8-bit load/store instructions with a register offset, both the
1536  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1537  // they're disambiguated by whether the shift was explicit or implicit rather
1538  // than its size.
1539  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1540  assert(N == 2 && "Invalid number of operands!");
1541  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1542  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1543  Inst.addOperand(MCOperand::createImm(IsSigned));
1544  Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1545  }
1546 
1547  template<int Shift>
1548  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1549  assert(N == 1 && "Invalid number of operands!");
1550 
1551  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1552  uint64_t Value = CE->getValue();
1553  Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1554  }
1555 
1556  template<int Shift>
1557  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1558  assert(N == 1 && "Invalid number of operands!");
1559 
1560  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1561  uint64_t Value = CE->getValue();
1562  Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1563  }
1564 
1565  void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1566  assert(N == 1 && "Invalid number of operands!");
1567  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1568  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1569  }
1570 
1571  void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1572  assert(N == 1 && "Invalid number of operands!");
1573  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1574  Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1575  }
1576 
1577  void print(raw_ostream &OS) const override;
1578 
1579  static std::unique_ptr<AArch64Operand>
1580  CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1581  auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1582  Op->Tok.Data = Str.data();
1583  Op->Tok.Length = Str.size();
1584  Op->Tok.IsSuffix = IsSuffix;
1585  Op->StartLoc = S;
1586  Op->EndLoc = S;
1587  return Op;
1588  }
1589 
1590  static std::unique_ptr<AArch64Operand>
1591  CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx) {
1592  auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1593  Op->Reg.RegNum = RegNum;
1594  Op->Reg.Kind = Kind;
1595  Op->StartLoc = S;
1596  Op->EndLoc = E;
1597  return Op;
1598  }
1599 
1600  static std::unique_ptr<AArch64Operand>
1601  CreateReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1602  SMLoc S, SMLoc E, MCContext &Ctx) {
1603  auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1604  Op->Reg.RegNum = RegNum;
1605  Op->Reg.ElementWidth = ElementWidth;
1606  Op->Reg.Kind = Kind;
1607  Op->StartLoc = S;
1608  Op->EndLoc = E;
1609  return Op;
1610  }
1611 
1612  static std::unique_ptr<AArch64Operand>
1613  CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1614  char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1615  auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1616  Op->VectorList.RegNum = RegNum;
1617  Op->VectorList.Count = Count;
1618  Op->VectorList.NumElements = NumElements;
1619  Op->VectorList.ElementKind = ElementKind;
1620  Op->StartLoc = S;
1621  Op->EndLoc = E;
1622  return Op;
1623  }
1624 
1625  static std::unique_ptr<AArch64Operand>
1626  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1627  auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1628  Op->VectorIndex.Val = Idx;
1629  Op->StartLoc = S;
1630  Op->EndLoc = E;
1631  return Op;
1632  }
1633 
1634  static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1635  SMLoc E, MCContext &Ctx) {
1636  auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1637  Op->Imm.Val = Val;
1638  Op->StartLoc = S;
1639  Op->EndLoc = E;
1640  return Op;
1641  }
1642 
1643  static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1644  unsigned ShiftAmount,
1645  SMLoc S, SMLoc E,
1646  MCContext &Ctx) {
1647  auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1648  Op->ShiftedImm .Val = Val;
1649  Op->ShiftedImm.ShiftAmount = ShiftAmount;
1650  Op->StartLoc = S;
1651  Op->EndLoc = E;
1652  return Op;
1653  }
1654 
1655  static std::unique_ptr<AArch64Operand>
1656  CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1657  auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1658  Op->CondCode.Code = Code;
1659  Op->StartLoc = S;
1660  Op->EndLoc = E;
1661  return Op;
1662  }
1663 
1664  static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1665  MCContext &Ctx) {
1666  auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1667  Op->FPImm.Val = Val;
1668  Op->StartLoc = S;
1669  Op->EndLoc = S;
1670  return Op;
1671  }
1672 
1673  static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1674  StringRef Str,
1675  SMLoc S,
1676  MCContext &Ctx) {
1677  auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1678  Op->Barrier.Val = Val;
1679  Op->Barrier.Data = Str.data();
1680  Op->Barrier.Length = Str.size();
1681  Op->StartLoc = S;
1682  Op->EndLoc = S;
1683  return Op;
1684  }
1685 
1686  static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1687  uint32_t MRSReg,
1688  uint32_t MSRReg,
1689  uint32_t PStateField,
1690  MCContext &Ctx) {
1691  auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1692  Op->SysReg.Data = Str.data();
1693  Op->SysReg.Length = Str.size();
1694  Op->SysReg.MRSReg = MRSReg;
1695  Op->SysReg.MSRReg = MSRReg;
1696  Op->SysReg.PStateField = PStateField;
1697  Op->StartLoc = S;
1698  Op->EndLoc = S;
1699  return Op;
1700  }
1701 
1702  static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1703  SMLoc E, MCContext &Ctx) {
1704  auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1705  Op->SysCRImm.Val = Val;
1706  Op->StartLoc = S;
1707  Op->EndLoc = E;
1708  return Op;
1709  }
1710 
1711  static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1712  StringRef Str,
1713  SMLoc S,
1714  MCContext &Ctx) {
1715  auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1716  Op->Prefetch.Val = Val;
1717  Op->Barrier.Data = Str.data();
1718  Op->Barrier.Length = Str.size();
1719  Op->StartLoc = S;
1720  Op->EndLoc = S;
1721  return Op;
1722  }
1723 
1724  static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1725  StringRef Str,
1726  SMLoc S,
1727  MCContext &Ctx) {
1728  auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1729  Op->PSBHint.Val = Val;
1730  Op->PSBHint.Data = Str.data();
1731  Op->PSBHint.Length = Str.size();
1732  Op->StartLoc = S;
1733  Op->EndLoc = S;
1734  return Op;
1735  }
1736 
1737  static std::unique_ptr<AArch64Operand>
1738  CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1739  bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1740  auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1741  Op->ShiftExtend.Type = ShOp;
1742  Op->ShiftExtend.Amount = Val;
1743  Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1744  Op->StartLoc = S;
1745  Op->EndLoc = E;
1746  return Op;
1747  }
1748 };
1749 
1750 } // end anonymous namespace.
1751 
1752 void AArch64Operand::print(raw_ostream &OS) const {
1753  switch (Kind) {
1754  case k_FPImm:
1755  OS << "<fpimm " << getFPImm() << "("
1756  << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1757  break;
1758  case k_Barrier: {
1759  StringRef Name = getBarrierName();
1760  if (!Name.empty())
1761  OS << "<barrier " << Name << ">";
1762  else
1763  OS << "<barrier invalid #" << getBarrier() << ">";
1764  break;
1765  }
1766  case k_Immediate:
1767  OS << *getImm();
1768  break;
1769  case k_ShiftedImm: {
1770  unsigned Shift = getShiftedImmShift();
1771  OS << "<shiftedimm ";
1772  OS << *getShiftedImmVal();
1773  OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1774  break;
1775  }
1776  case k_CondCode:
1777  OS << "<condcode " << getCondCode() << ">";
1778  break;
1779  case k_Register:
1780  OS << "<register " << getReg() << ">";
1781  break;
1782  case k_VectorList: {
1783  OS << "<vectorlist ";
1784  unsigned Reg = getVectorListStart();
1785  for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1786  OS << Reg + i << " ";
1787  OS << ">";
1788  break;
1789  }
1790  case k_VectorIndex:
1791  OS << "<vectorindex " << getVectorIndex() << ">";
1792  break;
1793  case k_SysReg:
1794  OS << "<sysreg: " << getSysReg() << '>';
1795  break;
1796  case k_Token:
1797  OS << "'" << getToken() << "'";
1798  break;
1799  case k_SysCR:
1800  OS << "c" << getSysCR();
1801  break;
1802  case k_Prefetch: {
1803  StringRef Name = getPrefetchName();
1804  if (!Name.empty())
1805  OS << "<prfop " << Name << ">";
1806  else
1807  OS << "<prfop invalid #" << getPrefetch() << ">";
1808  break;
1809  }
1810  case k_PSBHint:
1811  OS << getPSBHintName();
1812  break;
1813  case k_ShiftExtend:
1814  OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1815  << getShiftExtendAmount();
1816  if (!hasShiftExtendAmount())
1817  OS << "<imp>";
1818  OS << '>';
1819  break;
1820  }
1821 }
1822 
1823 /// @name Auto-generated Match Functions
1824 /// {
1825 
1826 static unsigned MatchRegisterName(StringRef Name);
1827 
1828 /// }
1829 
1830 static unsigned MatchNeonVectorRegName(StringRef Name) {
1831  return StringSwitch<unsigned>(Name.lower())
1832  .Case("v0", AArch64::Q0)
1833  .Case("v1", AArch64::Q1)
1834  .Case("v2", AArch64::Q2)
1835  .Case("v3", AArch64::Q3)
1836  .Case("v4", AArch64::Q4)
1837  .Case("v5", AArch64::Q5)
1838  .Case("v6", AArch64::Q6)
1839  .Case("v7", AArch64::Q7)
1840  .Case("v8", AArch64::Q8)
1841  .Case("v9", AArch64::Q9)
1842  .Case("v10", AArch64::Q10)
1843  .Case("v11", AArch64::Q11)
1844  .Case("v12", AArch64::Q12)
1845  .Case("v13", AArch64::Q13)
1846  .Case("v14", AArch64::Q14)
1847  .Case("v15", AArch64::Q15)
1848  .Case("v16", AArch64::Q16)
1849  .Case("v17", AArch64::Q17)
1850  .Case("v18", AArch64::Q18)
1851  .Case("v19", AArch64::Q19)
1852  .Case("v20", AArch64::Q20)
1853  .Case("v21", AArch64::Q21)
1854  .Case("v22", AArch64::Q22)
1855  .Case("v23", AArch64::Q23)
1856  .Case("v24", AArch64::Q24)
1857  .Case("v25", AArch64::Q25)
1858  .Case("v26", AArch64::Q26)
1859  .Case("v27", AArch64::Q27)
1860  .Case("v28", AArch64::Q28)
1861  .Case("v29", AArch64::Q29)
1862  .Case("v30", AArch64::Q30)
1863  .Case("v31", AArch64::Q31)
1864  .Default(0);
1865 }
1866 
1867 static bool isValidVectorKind(StringRef Name) {
1868  return StringSwitch<bool>(Name.lower())
1869  .Case(".8b", true)
1870  .Case(".16b", true)
1871  .Case(".4h", true)
1872  .Case(".8h", true)
1873  .Case(".2s", true)
1874  .Case(".4s", true)
1875  .Case(".1d", true)
1876  .Case(".2d", true)
1877  .Case(".1q", true)
1878  // Accept the width neutral ones, too, for verbose syntax. If those
1879  // aren't used in the right places, the token operand won't match so
1880  // all will work out.
1881  .Case(".b", true)
1882  .Case(".h", true)
1883  .Case(".s", true)
1884  .Case(".d", true)
1885  // Needed for fp16 scalar pairwise reductions
1886  .Case(".2h", true)
1887  // another special case for the ARMv8.2a dot product operand
1888  .Case(".4b", true)
1889  .Default(false);
1890 }
1891 
1892 static unsigned matchSVEDataVectorRegName(StringRef Name) {
1893  return StringSwitch<unsigned>(Name.lower())
1894  .Case("z0", AArch64::Z0)
1895  .Case("z1", AArch64::Z1)
1896  .Case("z2", AArch64::Z2)
1897  .Case("z3", AArch64::Z3)
1898  .Case("z4", AArch64::Z4)
1899  .Case("z5", AArch64::Z5)
1900  .Case("z6", AArch64::Z6)
1901  .Case("z7", AArch64::Z7)
1902  .Case("z8", AArch64::Z8)
1903  .Case("z9", AArch64::Z9)
1904  .Case("z10", AArch64::Z10)
1905  .Case("z11", AArch64::Z11)
1906  .Case("z12", AArch64::Z12)
1907  .Case("z13", AArch64::Z13)
1908  .Case("z14", AArch64::Z14)
1909  .Case("z15", AArch64::Z15)
1910  .Case("z16", AArch64::Z16)
1911  .Case("z17", AArch64::Z17)
1912  .Case("z18", AArch64::Z18)
1913  .Case("z19", AArch64::Z19)
1914  .Case("z20", AArch64::Z20)
1915  .Case("z21", AArch64::Z21)
1916  .Case("z22", AArch64::Z22)
1917  .Case("z23", AArch64::Z23)
1918  .Case("z24", AArch64::Z24)
1919  .Case("z25", AArch64::Z25)
1920  .Case("z26", AArch64::Z26)
1921  .Case("z27", AArch64::Z27)
1922  .Case("z28", AArch64::Z28)
1923  .Case("z29", AArch64::Z29)
1924  .Case("z30", AArch64::Z30)
1925  .Case("z31", AArch64::Z31)
1926  .Default(0);
1927 }
1928 
1929 static bool isValidSVEKind(StringRef Name) {
1930  return StringSwitch<bool>(Name.lower())
1931  .Case(".b", true)
1932  .Case(".h", true)
1933  .Case(".s", true)
1934  .Case(".d", true)
1935  .Case(".q", true)
1936  .Default(false);
1937 }
1938 
1939 static bool isSVEDataVectorRegister(StringRef Name) {
1940  return Name[0] == 'z';
1941 }
1942 
1943 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1944  char &ElementKind) {
1945  assert(isValidVectorKind(Name));
1946 
1947  ElementKind = Name.lower()[Name.size() - 1];
1948  NumElements = 0;
1949 
1950  if (Name.size() == 2)
1951  return;
1952 
1953  // Parse the lane count
1954  Name = Name.drop_front();
1955  while (isdigit(Name.front())) {
1956  NumElements = 10 * NumElements + (Name.front() - '0');
1957  Name = Name.drop_front();
1958  }
1959 }
1960 
1961 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1962  SMLoc &EndLoc) {
1963  StartLoc = getLoc();
1964  RegNo = tryParseRegister();
1965  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1966  return (RegNo == (unsigned)-1);
1967 }
1968 
1969 // Matches a register name or register alias previously defined by '.req'
1970 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1971  RegKind Kind) {
1972  unsigned RegNum;
1973  switch (Kind) {
1974  case RegKind::Scalar:
1975  RegNum = MatchRegisterName(Name);
1976  break;
1977  case RegKind::NeonVector:
1978  RegNum = MatchNeonVectorRegName(Name);
1979  break;
1980  case RegKind::SVEDataVector:
1981  RegNum = matchSVEDataVectorRegName(Name);
1982  break;
1983  }
1984 
1985  if (!RegNum) {
1986  // Check for aliases registered via .req. Canonicalize to lower case.
1987  // That's more consistent since register names are case insensitive, and
1988  // it's how the original entry was passed in from MC/MCParser/AsmParser.
1989  auto Entry = RegisterReqs.find(Name.lower());
1990  if (Entry == RegisterReqs.end())
1991  return 0;
1992 
1993  // set RegNum if the match is the right kind of register
1994  if (Kind == Entry->getValue().first)
1995  RegNum = Entry->getValue().second;
1996  }
1997  return RegNum;
1998 }
1999 
2000 /// tryParseRegister - Try to parse a register name. The token must be an
2001 /// Identifier when called, and if it is a register name the token is eaten and
2002 /// the register is added to the operand list.
2003 int AArch64AsmParser::tryParseRegister() {
2004  MCAsmParser &Parser = getParser();
2005  const AsmToken &Tok = Parser.getTok();
2006  if (Tok.isNot(AsmToken::Identifier))
2007  return -1;
2008 
2009  std::string lowerCase = Tok.getString().lower();
2010  if (isSVEDataVectorRegister(lowerCase))
2011  return -1;
2012 
2013  unsigned RegNum = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2014  // Also handle a few aliases of registers.
2015  if (RegNum == 0)
2016  RegNum = StringSwitch<unsigned>(lowerCase)
2017  .Case("fp", AArch64::FP)
2018  .Case("lr", AArch64::LR)
2019  .Case("x31", AArch64::XZR)
2020  .Case("w31", AArch64::WZR)
2021  .Default(0);
2022 
2023  if (RegNum == 0)
2024  return -1;
2025 
2026  Parser.Lex(); // Eat identifier token.
2027  return RegNum;
2028 }
2029 
2030 /// tryMatchVectorRegister - Try to parse a vector register name with optional
2031 /// kind specifier. If it is a register specifier, eat the token and return it.
2032 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
2033  MCAsmParser &Parser = getParser();
2034  if (Parser.getTok().isNot(AsmToken::Identifier)) {
2035  TokError("vector register expected");
2036  return -1;
2037  }
2038 
2039  StringRef Name = Parser.getTok().getString();
2040  // If there is a kind specifier, it's separated from the register name by
2041  // a '.'.
2042  size_t Start = 0, Next = Name.find('.');
2043  StringRef Head = Name.slice(Start, Next);
2044  unsigned RegNum = matchRegisterNameAlias(Head, RegKind::NeonVector);
2045 
2046  if (RegNum) {
2047  if (Next != StringRef::npos) {
2048  Kind = Name.slice(Next, StringRef::npos);
2049  if (!isValidVectorKind(Kind)) {
2050  TokError("invalid vector kind qualifier");
2051  return -1;
2052  }
2053  }
2054  Parser.Lex(); // Eat the register token.
2055  return RegNum;
2056  }
2057 
2058  if (expected)
2059  TokError("vector register expected");
2060  return -1;
2061 }
2062 
2063 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2065 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2066  MCAsmParser &Parser = getParser();
2067  SMLoc S = getLoc();
2068 
2069  if (Parser.getTok().isNot(AsmToken::Identifier)) {
2070  Error(S, "Expected cN operand where 0 <= N <= 15");
2071  return MatchOperand_ParseFail;
2072  }
2073 
2074  StringRef Tok = Parser.getTok().getIdentifier();
2075  if (Tok[0] != 'c' && Tok[0] != 'C') {
2076  Error(S, "Expected cN operand where 0 <= N <= 15");
2077  return MatchOperand_ParseFail;
2078  }
2079 
2080  uint32_t CRNum;
2081  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2082  if (BadNum || CRNum > 15) {
2083  Error(S, "Expected cN operand where 0 <= N <= 15");
2084  return MatchOperand_ParseFail;
2085  }
2086 
2087  Parser.Lex(); // Eat identifier token.
2088  Operands.push_back(
2089  AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2090  return MatchOperand_Success;
2091 }
2092 
2093 /// tryParsePrefetch - Try to parse a prefetch operand.
2095 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2096  MCAsmParser &Parser = getParser();
2097  SMLoc S = getLoc();
2098  const AsmToken &Tok = Parser.getTok();
2099  // Either an identifier for named values or a 5-bit immediate.
2100  // Eat optional hash.
2101  if (parseOptionalToken(AsmToken::Hash) ||
2102  Tok.is(AsmToken::Integer)) {
2103  const MCExpr *ImmVal;
2104  if (getParser().parseExpression(ImmVal))
2105  return MatchOperand_ParseFail;
2106 
2107  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2108  if (!MCE) {
2109  TokError("immediate value expected for prefetch operand");
2110  return MatchOperand_ParseFail;
2111  }
2112  unsigned prfop = MCE->getValue();
2113  if (prfop > 31) {
2114  TokError("prefetch operand out of range, [0,31] expected");
2115  return MatchOperand_ParseFail;
2116  }
2117 
2118  auto PRFM = AArch64PRFM::lookupPRFMByEncoding(MCE->getValue());
2119  Operands.push_back(AArch64Operand::CreatePrefetch(
2120  prfop, PRFM ? PRFM->Name : "", S, getContext()));
2121  return MatchOperand_Success;
2122  }
2123 
2124  if (Tok.isNot(AsmToken::Identifier)) {
2125  TokError("pre-fetch hint expected");
2126  return MatchOperand_ParseFail;
2127  }
2128 
2129  auto PRFM = AArch64PRFM::lookupPRFMByName(Tok.getString());
2130  if (!PRFM) {
2131  TokError("pre-fetch hint expected");
2132  return MatchOperand_ParseFail;
2133  }
2134 
2135  Parser.Lex(); // Eat identifier token.
2136  Operands.push_back(AArch64Operand::CreatePrefetch(
2137  PRFM->Encoding, Tok.getString(), S, getContext()));
2138  return MatchOperand_Success;
2139 }
2140 
2141 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2143 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2144  MCAsmParser &Parser = getParser();
2145  SMLoc S = getLoc();
2146  const AsmToken &Tok = Parser.getTok();
2147  if (Tok.isNot(AsmToken::Identifier)) {
2148  TokError("invalid operand for instruction");
2149  return MatchOperand_ParseFail;
2150  }
2151 
2152  auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2153  if (!PSB) {
2154  TokError("invalid operand for instruction");
2155  return MatchOperand_ParseFail;
2156  }
2157 
2158  Parser.Lex(); // Eat identifier token.
2159  Operands.push_back(AArch64Operand::CreatePSBHint(
2160  PSB->Encoding, Tok.getString(), S, getContext()));
2161  return MatchOperand_Success;
2162 }
2163 
2164 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2165 /// instruction.
2167 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2168  MCAsmParser &Parser = getParser();
2169  SMLoc S = getLoc();
2170  const MCExpr *Expr;
2171 
2172  if (Parser.getTok().is(AsmToken::Hash)) {
2173  Parser.Lex(); // Eat hash token.
2174  }
2175 
2176  if (parseSymbolicImmVal(Expr))
2177  return MatchOperand_ParseFail;
2178 
2179  AArch64MCExpr::VariantKind ELFRefKind;
2180  MCSymbolRefExpr::VariantKind DarwinRefKind;
2181  int64_t Addend;
2182  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2183  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2184  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2185  // No modifier was specified at all; this is the syntax for an ELF basic
2186  // ADRP relocation (unfortunately).
2187  Expr =
2189  } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2190  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2191  Addend != 0) {
2192  Error(S, "gotpage label reference not allowed an addend");
2193  return MatchOperand_ParseFail;
2194  } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2195  DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2196  DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2197  ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2198  ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2199  ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2200  // The operand must be an @page or @gotpage qualified symbolref.
2201  Error(S, "page or gotpage label reference expected");
2202  return MatchOperand_ParseFail;
2203  }
2204  }
2205 
2206  // We have either a label reference possibly with addend or an immediate. The
2207  // addend is a raw value here. The linker will adjust it to only reference the
2208  // page.
2209  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2210  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2211 
2212  return MatchOperand_Success;
2213 }
2214 
2215 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2216 /// instruction.
2218 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2219  SMLoc S = getLoc();
2220  const MCExpr *Expr;
2221 
2222  parseOptionalToken(AsmToken::Hash);
2223  if (getParser().parseExpression(Expr))
2224  return MatchOperand_ParseFail;
2225 
2226  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2227  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2228 
2229  return MatchOperand_Success;
2230 }
2231 
2232 /// tryParseFPImm - A floating point immediate expression operand.
2234 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2235  MCAsmParser &Parser = getParser();
2236  SMLoc S = getLoc();
2237 
2238  bool Hash = parseOptionalToken(AsmToken::Hash);
2239 
2240  // Handle negation, as that still comes through as a separate token.
2241  bool isNegative = parseOptionalToken(AsmToken::Minus);
2242 
2243  const AsmToken &Tok = Parser.getTok();
2244  if (Tok.is(AsmToken::Real) || Tok.is(AsmToken::Integer)) {
2245  int64_t Val;
2246  if (Tok.is(AsmToken::Integer) && !isNegative && Tok.getString().startswith("0x")) {
2247  Val = Tok.getIntVal();
2248  if (Val > 255 || Val < 0) {
2249  TokError("encoded floating point value out of range");
2250  return MatchOperand_ParseFail;
2251  }
2252  } else {
2253  APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
2254  if (isNegative)
2255  RealVal.changeSign();
2256 
2257  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2258  Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2259 
2260  // Check for out of range values. As an exception we let Zero through,
2261  // but as tokens instead of an FPImm so that it can be matched by the
2262  // appropriate alias if one exists.
2263  if (RealVal.isPosZero()) {
2264  Parser.Lex(); // Eat the token.
2265  Operands.push_back(AArch64Operand::CreateToken("#0", false, S, getContext()));
2266  Operands.push_back(AArch64Operand::CreateToken(".0", false, S, getContext()));
2267  return MatchOperand_Success;
2268  } else if (Val == -1) {
2269  TokError("expected compatible register or floating-point constant");
2270  return MatchOperand_ParseFail;
2271  }
2272  }
2273  Parser.Lex(); // Eat the token.
2274  Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2275  return MatchOperand_Success;
2276  }
2277 
2278  if (!Hash)
2279  return MatchOperand_NoMatch;
2280 
2281  TokError("invalid floating point immediate");
2282  return MatchOperand_ParseFail;
2283 }
2284 
2285 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2287 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2288  MCAsmParser &Parser = getParser();
2289  SMLoc S = getLoc();
2290 
2291  if (Parser.getTok().is(AsmToken::Hash))
2292  Parser.Lex(); // Eat '#'
2293  else if (Parser.getTok().isNot(AsmToken::Integer))
2294  // Operand should start from # or should be integer, emit error otherwise.
2295  return MatchOperand_NoMatch;
2296 
2297  const MCExpr *Imm;
2298  if (parseSymbolicImmVal(Imm))
2299  return MatchOperand_ParseFail;
2300  else if (Parser.getTok().isNot(AsmToken::Comma)) {
2301  uint64_t ShiftAmount = 0;
2302  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2303  if (MCE) {
2304  int64_t Val = MCE->getValue();
2305  if (Val > 0xfff && (Val & 0xfff) == 0) {
2306  Imm = MCConstantExpr::create(Val >> 12, getContext());
2307  ShiftAmount = 12;
2308  }
2309  }
2310  SMLoc E = Parser.getTok().getLoc();
2311  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2312  getContext()));
2313  return MatchOperand_Success;
2314  }
2315 
2316  // Eat ','
2317  Parser.Lex();
2318 
2319  // The optional operand must be "lsl #N" where N is non-negative.
2320  if (!Parser.getTok().is(AsmToken::Identifier) ||
2321  !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2322  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2323  return MatchOperand_ParseFail;
2324  }
2325 
2326  // Eat 'lsl'
2327  Parser.Lex();
2328 
2329  parseOptionalToken(AsmToken::Hash);
2330 
2331  if (Parser.getTok().isNot(AsmToken::Integer)) {
2332  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2333  return MatchOperand_ParseFail;
2334  }
2335 
2336  int64_t ShiftAmount = Parser.getTok().getIntVal();
2337 
2338  if (ShiftAmount < 0) {
2339  Error(Parser.getTok().getLoc(), "positive shift amount required");
2340  return MatchOperand_ParseFail;
2341  }
2342  Parser.Lex(); // Eat the number
2343 
2344  SMLoc E = Parser.getTok().getLoc();
2345  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2346  S, E, getContext()));
2347  return MatchOperand_Success;
2348 }
2349 
2350 /// parseCondCodeString - Parse a Condition Code string.
2351 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2353  .Case("eq", AArch64CC::EQ)
2354  .Case("ne", AArch64CC::NE)
2355  .Case("cs", AArch64CC::HS)
2356  .Case("hs", AArch64CC::HS)
2357  .Case("cc", AArch64CC::LO)
2358  .Case("lo", AArch64CC::LO)
2359  .Case("mi", AArch64CC::MI)
2360  .Case("pl", AArch64CC::PL)
2361  .Case("vs", AArch64CC::VS)
2362  .Case("vc", AArch64CC::VC)
2363  .Case("hi", AArch64CC::HI)
2364  .Case("ls", AArch64CC::LS)
2365  .Case("ge", AArch64CC::GE)
2366  .Case("lt", AArch64CC::LT)
2367  .Case("gt", AArch64CC::GT)
2368  .Case("le", AArch64CC::LE)
2369  .Case("al", AArch64CC::AL)
2370  .Case("nv", AArch64CC::NV)
2372  return CC;
2373 }
2374 
2375 /// parseCondCode - Parse a Condition Code operand.
2376 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2377  bool invertCondCode) {
2378  MCAsmParser &Parser = getParser();
2379  SMLoc S = getLoc();
2380  const AsmToken &Tok = Parser.getTok();
2381  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2382 
2383  StringRef Cond = Tok.getString();
2384  AArch64CC::CondCode CC = parseCondCodeString(Cond);
2385  if (CC == AArch64CC::Invalid)
2386  return TokError("invalid condition code");
2387  Parser.Lex(); // Eat identifier token.
2388 
2389  if (invertCondCode) {
2390  if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2391  return TokError("condition codes AL and NV are invalid for this instruction");
2393  }
2394 
2395  Operands.push_back(
2396  AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2397  return false;
2398 }
2399 
2400 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2401 /// them if present.
2403 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2404  MCAsmParser &Parser = getParser();
2405  const AsmToken &Tok = Parser.getTok();
2406  std::string LowerID = Tok.getString().lower();
2409  .Case("lsl", AArch64_AM::LSL)
2410  .Case("lsr", AArch64_AM::LSR)
2411  .Case("asr", AArch64_AM::ASR)
2412  .Case("ror", AArch64_AM::ROR)
2413  .Case("msl", AArch64_AM::MSL)
2414  .Case("uxtb", AArch64_AM::UXTB)
2415  .Case("uxth", AArch64_AM::UXTH)
2416  .Case("uxtw", AArch64_AM::UXTW)
2417  .Case("uxtx", AArch64_AM::UXTX)
2418  .Case("sxtb", AArch64_AM::SXTB)
2419  .Case("sxth", AArch64_AM::SXTH)
2420  .Case("sxtw", AArch64_AM::SXTW)
2421  .Case("sxtx", AArch64_AM::SXTX)
2423 
2424  if (ShOp == AArch64_AM::InvalidShiftExtend)
2425  return MatchOperand_NoMatch;
2426 
2427  SMLoc S = Tok.getLoc();
2428  Parser.Lex();
2429 
2430  bool Hash = parseOptionalToken(AsmToken::Hash);
2431 
2432  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2433  if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2434  ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2435  ShOp == AArch64_AM::MSL) {
2436  // We expect a number here.
2437  TokError("expected #imm after shift specifier");
2438  return MatchOperand_ParseFail;
2439  }
2440 
2441  // "extend" type operations don't need an immediate, #0 is implicit.
2442  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2443  Operands.push_back(
2444  AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2445  return MatchOperand_Success;
2446  }
2447 
2448  // Make sure we do actually have a number, identifier or a parenthesized
2449  // expression.
2450  SMLoc E = Parser.getTok().getLoc();
2451  if (!Parser.getTok().is(AsmToken::Integer) &&
2452  !Parser.getTok().is(AsmToken::LParen) &&
2453  !Parser.getTok().is(AsmToken::Identifier)) {
2454  Error(E, "expected integer shift amount");
2455  return MatchOperand_ParseFail;
2456  }
2457 
2458  const MCExpr *ImmVal;
2459  if (getParser().parseExpression(ImmVal))
2460  return MatchOperand_ParseFail;
2461 
2462  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2463  if (!MCE) {
2464  Error(E, "expected constant '#imm' after shift specifier");
2465  return MatchOperand_ParseFail;
2466  }
2467 
2468  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2469  Operands.push_back(AArch64Operand::CreateShiftExtend(
2470  ShOp, MCE->getValue(), true, S, E, getContext()));
2471  return MatchOperand_Success;
2472 }
2473 
2474 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2475  if (FBS[AArch64::HasV8_1aOps])
2476  Str += "ARMv8.1a";
2477  else if (FBS[AArch64::HasV8_2aOps])
2478  Str += "ARMv8.2a";
2479  else
2480  Str += "(unknown)";
2481 }
2482 
2483 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2484  SMLoc S) {
2485  const uint16_t Op2 = Encoding & 7;
2486  const uint16_t Cm = (Encoding & 0x78) >> 3;
2487  const uint16_t Cn = (Encoding & 0x780) >> 7;
2488  const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2489 
2490  const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2491 
2492  Operands.push_back(
2493  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2494  Operands.push_back(
2495  AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2496  Operands.push_back(
2497  AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2498  Expr = MCConstantExpr::create(Op2, getContext());
2499  Operands.push_back(
2500  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2501 }
2502 
2503 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2504 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2505 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2506  OperandVector &Operands) {
2507  if (Name.find('.') != StringRef::npos)
2508  return TokError("invalid operand");
2509 
2510  Mnemonic = Name;
2511  Operands.push_back(
2512  AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2513 
2514  MCAsmParser &Parser = getParser();
2515  const AsmToken &Tok = Parser.getTok();
2516  StringRef Op = Tok.getString();
2517  SMLoc S = Tok.getLoc();
2518 
2519  if (Mnemonic == "ic") {
2520  const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2521  if (!IC)
2522  return TokError("invalid operand for IC instruction");
2523  else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2524  std::string Str("IC " + std::string(IC->Name) + " requires ");
2526  return TokError(Str.c_str());
2527  }
2528  createSysAlias(IC->Encoding, Operands, S);
2529  } else if (Mnemonic == "dc") {
2530  const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2531  if (!DC)
2532  return TokError("invalid operand for DC instruction");
2533  else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2534  std::string Str("DC " + std::string(DC->Name) + " requires ");
2536  return TokError(Str.c_str());
2537  }
2538  createSysAlias(DC->Encoding, Operands, S);
2539  } else if (Mnemonic == "at") {
2540  const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2541  if (!AT)
2542  return TokError("invalid operand for AT instruction");
2543  else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2544  std::string Str("AT " + std::string(AT->Name) + " requires ");
2546  return TokError(Str.c_str());
2547  }
2548  createSysAlias(AT->Encoding, Operands, S);
2549  } else if (Mnemonic == "tlbi") {
2550  const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2551  if (!TLBI)
2552  return TokError("invalid operand for TLBI instruction");
2553  else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2554  std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2556  return TokError(Str.c_str());
2557  }
2558  createSysAlias(TLBI->Encoding, Operands, S);
2559  }
2560 
2561  Parser.Lex(); // Eat operand.
2562 
2563  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2564  bool HasRegister = false;
2565 
2566  // Check for the optional register operand.
2567  if (parseOptionalToken(AsmToken::Comma)) {
2568  if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2569  return TokError("expected register operand");
2570  HasRegister = true;
2571  }
2572 
2573  if (ExpectRegister && !HasRegister)
2574  return TokError("specified " + Mnemonic + " op requires a register");
2575  else if (!ExpectRegister && HasRegister)
2576  return TokError("specified " + Mnemonic + " op does not use a register");
2577 
2578  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2579  return true;
2580 
2581  return false;
2582 }
2583 
2585 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2586  MCAsmParser &Parser = getParser();
2587  const AsmToken &Tok = Parser.getTok();
2588 
2589  // Can be either a #imm style literal or an option name
2590  if (parseOptionalToken(AsmToken::Hash) ||
2591  Tok.is(AsmToken::Integer)) {
2592  // Immediate operand.
2593  const MCExpr *ImmVal;
2594  SMLoc ExprLoc = getLoc();
2595  if (getParser().parseExpression(ImmVal))
2596  return MatchOperand_ParseFail;
2597  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2598  if (!MCE) {
2599  Error(ExprLoc, "immediate value expected for barrier operand");
2600  return MatchOperand_ParseFail;
2601  }
2602  if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2603  Error(ExprLoc, "barrier operand out of range");
2604  return MatchOperand_ParseFail;
2605  }
2606  auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
2607  Operands.push_back(AArch64Operand::CreateBarrier(
2608  MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
2609  return MatchOperand_Success;
2610  }
2611 
2612  if (Tok.isNot(AsmToken::Identifier)) {
2613  TokError("invalid operand for instruction");
2614  return MatchOperand_ParseFail;
2615  }
2616 
2617  // The only valid named option for ISB is 'sy'
2618  auto DB = AArch64DB::lookupDBByName(Tok.getString());
2619  if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
2620  TokError("'sy' or #imm operand expected");
2621  return MatchOperand_ParseFail;
2622  } else if (!DB) {
2623  TokError("invalid barrier option name");
2624  return MatchOperand_ParseFail;
2625  }
2626 
2627  Operands.push_back(AArch64Operand::CreateBarrier(
2628  DB->Encoding, Tok.getString(), getLoc(), getContext()));
2629  Parser.Lex(); // Consume the option
2630 
2631  return MatchOperand_Success;
2632 }
2633 
2635 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2636  MCAsmParser &Parser = getParser();
2637  const AsmToken &Tok = Parser.getTok();
2638 
2639  if (Tok.isNot(AsmToken::Identifier))
2640  return MatchOperand_NoMatch;
2641 
2642  int MRSReg, MSRReg;
2643  auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
2644  if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
2645  MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
2646  MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
2647  } else
2648  MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
2649 
2650  auto PState = AArch64PState::lookupPStateByName(Tok.getString());
2651  unsigned PStateImm = -1;
2652  if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
2653  PStateImm = PState->Encoding;
2654 
2655  Operands.push_back(
2656  AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
2657  PStateImm, getContext()));
2658  Parser.Lex(); // Eat identifier
2659 
2660  return MatchOperand_Success;
2661 }
2662 
2663 /// tryParseNeonVectorRegister - Parse a vector register operand.
2664 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
2665  MCAsmParser &Parser = getParser();
2666  if (Parser.getTok().isNot(AsmToken::Identifier))
2667  return true;
2668 
2669  SMLoc S = getLoc();
2670  // Check for a vector register specifier first.
2671  StringRef Kind;
2672  int64_t Reg = tryMatchVectorRegister(Kind, false);
2673  if (Reg == -1)
2674  return true;
2675  Operands.push_back(
2676  AArch64Operand::CreateReg(Reg, RegKind::NeonVector, S, getLoc(),
2677  getContext()));
2678 
2679  // If there was an explicit qualifier, that goes on as a literal text
2680  // operand.
2681  if (!Kind.empty())
2682  Operands.push_back(
2683  AArch64Operand::CreateToken(Kind, false, S, getContext()));
2684 
2685  // If there is an index specifier following the register, parse that too.
2686  SMLoc SIdx = getLoc();
2687  if (parseOptionalToken(AsmToken::LBrac)) {
2688  const MCExpr *ImmVal;
2689  if (getParser().parseExpression(ImmVal))
2690  return false;
2691  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2692  if (!MCE) {
2693  TokError("immediate value expected for vector index");
2694  return false;
2695  }
2696 
2697  SMLoc E = getLoc();
2698 
2699  if (parseToken(AsmToken::RBrac, "']' expected"))
2700  return false;
2701 
2702  Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2703  E, getContext()));
2704  }
2705 
2706  return false;
2707 }
2708 
2709 // tryParseSVEDataVectorRegister - Try to parse a SVE vector register name with
2710 // optional kind specifier. If it is a register specifier, eat the token
2711 // and return it.
2713 AArch64AsmParser::tryParseSVERegister(int &Reg, StringRef &Kind,
2714  RegKind MatchKind) {
2715  MCAsmParser &Parser = getParser();
2716  const AsmToken &Tok = Parser.getTok();
2717 
2718  if (Tok.isNot(AsmToken::Identifier))
2719  return MatchOperand_NoMatch;
2720 
2721  StringRef Name = Tok.getString();
2722  // If there is a kind specifier, it's separated from the register name by
2723  // a '.'.
2724  size_t Start = 0, Next = Name.find('.');
2725  StringRef Head = Name.slice(Start, Next);
2726  unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
2727 
2728  if (RegNum) {
2729  if (Next != StringRef::npos) {
2730  Kind = Name.slice(Next, StringRef::npos);
2731  if (!isValidSVEKind(Kind)) {
2732  TokError("invalid sve vector kind qualifier");
2733  return MatchOperand_ParseFail;
2734  }
2735  }
2736  Parser.Lex(); // Eat the register token.
2737 
2738  Reg = RegNum;
2739  return MatchOperand_Success;
2740  }
2741 
2742  return MatchOperand_NoMatch;
2743 }
2744 
2745 /// parseRegister - Parse a non-vector register operand.
2746 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2747  SMLoc S = getLoc();
2748  // Try for a vector (neon) register.
2749  if (!tryParseNeonVectorRegister(Operands))
2750  return false;
2751 
2752  // Try for a scalar register.
2753  int64_t Reg = tryParseRegister();
2754  if (Reg == -1)
2755  return true;
2756  Operands.push_back(AArch64Operand::CreateReg(Reg, RegKind::Scalar, S,
2757  getLoc(), getContext()));
2758 
2759  return false;
2760 }
2761 
2762 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2763  MCAsmParser &Parser = getParser();
2764  bool HasELFModifier = false;
2766 
2767  if (parseOptionalToken(AsmToken::Colon)) {
2768  HasELFModifier = true;
2769 
2770  if (Parser.getTok().isNot(AsmToken::Identifier))
2771  return TokError("expect relocation specifier in operand after ':'");
2772 
2773  std::string LowerCase = Parser.getTok().getIdentifier().lower();
2774  RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2775  .Case("lo12", AArch64MCExpr::VK_LO12)
2776  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2777  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2778  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2779  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2780  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2781  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2782  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2783  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2784  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2785  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2786  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2787  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2788  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2789  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2790  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2791  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2792  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2793  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2794  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2795  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2796  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2797  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2798  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2799  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2800  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2801  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2802  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2804  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2806  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2807  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2808  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2811 
2812  if (RefKind == AArch64MCExpr::VK_INVALID)
2813  return TokError("expect relocation specifier in operand after ':'");
2814 
2815  Parser.Lex(); // Eat identifier
2816 
2817  if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
2818  return true;
2819  }
2820 
2821  if (getParser().parseExpression(ImmVal))
2822  return true;
2823 
2824  if (HasELFModifier)
2825  ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2826 
2827  return false;
2828 }
2829 
2830 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2831 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2832  MCAsmParser &Parser = getParser();
2833  assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2834  SMLoc S = getLoc();
2835  Parser.Lex(); // Eat left bracket token.
2836  StringRef Kind;
2837  int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2838  if (FirstReg == -1)
2839  return true;
2840  int64_t PrevReg = FirstReg;
2841  unsigned Count = 1;
2842 
2843  if (parseOptionalToken(AsmToken::Minus)) {
2844  SMLoc Loc = getLoc();
2845  StringRef NextKind;
2846  int64_t Reg = tryMatchVectorRegister(NextKind, true);
2847  if (Reg == -1)
2848  return true;
2849  // Any Kind suffices must match on all regs in the list.
2850  if (Kind != NextKind)
2851  return Error(Loc, "mismatched register size suffix");
2852 
2853  unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2854 
2855  if (Space == 0 || Space > 3) {
2856  return Error(Loc, "invalid number of vectors");
2857  }
2858 
2859  Count += Space;
2860  }
2861  else {
2862  while (parseOptionalToken(AsmToken::Comma)) {
2863  SMLoc Loc = getLoc();
2864  StringRef NextKind;
2865  int64_t Reg = tryMatchVectorRegister(NextKind, true);
2866  if (Reg == -1)
2867  return true;
2868  // Any Kind suffices must match on all regs in the list.
2869  if (Kind != NextKind)
2870  return Error(Loc, "mismatched register size suffix");
2871 
2872  // Registers must be incremental (with wraparound at 31)
2873  if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2874  (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2875  return Error(Loc, "registers must be sequential");
2876 
2877  PrevReg = Reg;
2878  ++Count;
2879  }
2880  }
2881 
2882  if (parseToken(AsmToken::RCurly, "'}' expected"))
2883  return true;
2884 
2885  if (Count > 4)
2886  return Error(S, "invalid number of vectors");
2887 
2888  unsigned NumElements = 0;
2889  char ElementKind = 0;
2890  if (!Kind.empty())
2891  parseValidVectorKind(Kind, NumElements, ElementKind);
2892 
2893  Operands.push_back(AArch64Operand::CreateVectorList(
2894  FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2895 
2896  // If there is an index specifier following the list, parse that too.
2897  SMLoc SIdx = getLoc();
2898  if (parseOptionalToken(AsmToken::LBrac)) { // Eat left bracket token.
2899  const MCExpr *ImmVal;
2900  if (getParser().parseExpression(ImmVal))
2901  return false;
2902  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2903  if (!MCE) {
2904  TokError("immediate value expected for vector index");
2905  return false;
2906  }
2907 
2908  SMLoc E = getLoc();
2909  if (parseToken(AsmToken::RBrac, "']' expected"))
2910  return false;
2911 
2912  Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2913  E, getContext()));
2914  }
2915  return false;
2916 }
2917 
2919 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2920  MCAsmParser &Parser = getParser();
2921  const AsmToken &Tok = Parser.getTok();
2922  if (!Tok.is(AsmToken::Identifier))
2923  return MatchOperand_NoMatch;
2924 
2925  unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), RegKind::Scalar);
2926 
2927  MCContext &Ctx = getContext();
2928  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2929  if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2930  return MatchOperand_NoMatch;
2931 
2932  SMLoc S = getLoc();
2933  Parser.Lex(); // Eat register
2934 
2935  if (!parseOptionalToken(AsmToken::Comma)) {
2936  Operands.push_back(
2937  AArch64Operand::CreateReg(RegNum, RegKind::Scalar, S, getLoc(), Ctx));
2938  return MatchOperand_Success;
2939  }
2940 
2941  parseOptionalToken(AsmToken::Hash);
2942 
2943  if (Parser.getTok().isNot(AsmToken::Integer)) {
2944  Error(getLoc(), "index must be absent or #0");
2945  return MatchOperand_ParseFail;
2946  }
2947 
2948  const MCExpr *ImmVal;
2949  if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2950  cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2951  Error(getLoc(), "index must be absent or #0");
2952  return MatchOperand_ParseFail;
2953  }
2954 
2955  Operands.push_back(
2956  AArch64Operand::CreateReg(RegNum, RegKind::Scalar, S, getLoc(), Ctx));
2957  return MatchOperand_Success;
2958 }
2959 
2960 /// parseOperand - Parse a arm instruction operand. For now this parses the
2961 /// operand regardless of the mnemonic.
2962 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2963  bool invertCondCode) {
2964  MCAsmParser &Parser = getParser();
2965  // Check if the current operand has a custom associated parser, if so, try to
2966  // custom parse the operand, or fallback to the general approach.
2967  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2968  if (ResTy == MatchOperand_Success)
2969  return false;
2970  // If there wasn't a custom match, try the generic matcher below. Otherwise,
2971  // there was a match, but an error occurred, in which case, just return that
2972  // the operand parsing failed.
2973  if (ResTy == MatchOperand_ParseFail)
2974  return true;
2975 
2976  // Nothing custom, so do general case parsing.
2977  SMLoc S, E;
2978  switch (getLexer().getKind()) {
2979  default: {
2980  SMLoc S = getLoc();
2981  const MCExpr *Expr;
2982  if (parseSymbolicImmVal(Expr))
2983  return Error(S, "invalid operand");
2984 
2985  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2986  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2987  return false;
2988  }
2989  case AsmToken::LBrac: {
2990  SMLoc Loc = Parser.getTok().getLoc();
2991  Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2992  getContext()));
2993  Parser.Lex(); // Eat '['
2994 
2995  // There's no comma after a '[', so we can parse the next operand
2996  // immediately.
2997  return parseOperand(Operands, false, false);
2998  }
2999  case AsmToken::LCurly:
3000  return parseVectorList(Operands);
3001  case AsmToken::Identifier: {
3002  // If we're expecting a Condition Code operand, then just parse that.
3003  if (isCondCode)
3004  return parseCondCode(Operands, invertCondCode);
3005 
3006  // If it's a register name, parse it.
3007  if (!parseRegister(Operands))
3008  return false;
3009 
3010  // This could be an optional "shift" or "extend" operand.
3011  OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3012  // We can only continue if no tokens were eaten.
3013  if (GotShift != MatchOperand_NoMatch)
3014  return GotShift;
3015 
3016  // This was not a register so parse other operands that start with an
3017  // identifier (like labels) as expressions and create them as immediates.
3018  const MCExpr *IdVal;
3019  S = getLoc();
3020  if (getParser().parseExpression(IdVal))
3021  return true;
3022  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3023  Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3024  return false;
3025  }
3026  case AsmToken::Integer:
3027  case AsmToken::Real:
3028  case AsmToken::Hash: {
3029  // #42 -> immediate.
3030  S = getLoc();
3031 
3032  parseOptionalToken(AsmToken::Hash);
3033 
3034  // Parse a negative sign
3035  bool isNegative = false;
3036  if (Parser.getTok().is(AsmToken::Minus)) {
3037  isNegative = true;
3038  // We need to consume this token only when we have a Real, otherwise
3039  // we let parseSymbolicImmVal take care of it
3040  if (Parser.getLexer().peekTok().is(AsmToken::Real))
3041  Parser.Lex();
3042  }
3043 
3044  // The only Real that should come through here is a literal #0.0 for
3045  // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3046  // so convert the value.
3047  const AsmToken &Tok = Parser.getTok();
3048  if (Tok.is(AsmToken::Real)) {
3049  APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3050  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3051  if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3052  Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3053  Mnemonic != "fcmlt")
3054  return TokError("unexpected floating point literal");
3055  else if (IntVal != 0 || isNegative)
3056  return TokError("expected floating-point constant #0.0");
3057  Parser.Lex(); // Eat the token.
3058 
3059  Operands.push_back(
3060  AArch64Operand::CreateToken("#0", false, S, getContext()));
3061  Operands.push_back(
3062  AArch64Operand::CreateToken(".0", false, S, getContext()));
3063  return false;
3064  }
3065 
3066  const MCExpr *ImmVal;
3067  if (parseSymbolicImmVal(ImmVal))
3068  return true;
3069 
3070  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3071  Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3072  return false;
3073  }
3074  case AsmToken::Equal: {
3075  SMLoc Loc = getLoc();
3076  if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3077  return TokError("unexpected token in operand");
3078  Parser.Lex(); // Eat '='
3079  const MCExpr *SubExprVal;
3080  if (getParser().parseExpression(SubExprVal))
3081  return true;
3082 
3083  if (Operands.size() < 2 ||
3084  !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3085  return Error(Loc, "Only valid when first operand is register");
3086 
3087  bool IsXReg =
3088  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3089  Operands[1]->getReg());
3090 
3091  MCContext& Ctx = getContext();
3092  E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3093  // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3094  if (isa<MCConstantExpr>(SubExprVal)) {
3095  uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3096  uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3097  while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3098  ShiftAmt += 16;
3099  Imm >>= 16;
3100  }
3101  if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3102  Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3103  Operands.push_back(AArch64Operand::CreateImm(
3104  MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3105  if (ShiftAmt)
3106  Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3107  ShiftAmt, true, S, E, Ctx));
3108  return false;
3109  }
3110  APInt Simm = APInt(64, Imm << ShiftAmt);
3111  // check if the immediate is an unsigned or signed 32-bit int for W regs
3112  if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3113  return Error(Loc, "Immediate too large for register");
3114  }
3115  // If it is a label or an imm that cannot fit in a movz, put it into CP.
3116  const MCExpr *CPLoc =
3117  getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3118  Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3119  return false;
3120  }
3121  }
3122 }
3123 
3124 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3125 /// operands.
3126 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3127  StringRef Name, SMLoc NameLoc,
3128  OperandVector &Operands) {
3129  MCAsmParser &Parser = getParser();
3130  Name = StringSwitch<StringRef>(Name.lower())
3131  .Case("beq", "b.eq")
3132  .Case("bne", "b.ne")
3133  .Case("bhs", "b.hs")
3134  .Case("bcs", "b.cs")
3135  .Case("blo", "b.lo")
3136  .Case("bcc", "b.cc")
3137  .Case("bmi", "b.mi")
3138  .Case("bpl", "b.pl")
3139  .Case("bvs", "b.vs")
3140  .Case("bvc", "b.vc")
3141  .Case("bhi", "b.hi")
3142  .Case("bls", "b.ls")
3143  .Case("bge", "b.ge")
3144  .Case("blt", "b.lt")
3145  .Case("bgt", "b.gt")
3146  .Case("ble", "b.le")
3147  .Case("bal", "b.al")
3148  .Case("bnv", "b.nv")
3149  .Default(Name);
3150 
3151  // First check for the AArch64-specific .req directive.
3152  if (Parser.getTok().is(AsmToken::Identifier) &&
3153  Parser.getTok().getIdentifier() == ".req") {
3154  parseDirectiveReq(Name, NameLoc);
3155  // We always return 'error' for this, as we're done with this
3156  // statement and don't need to match the 'instruction."
3157  return true;
3158  }
3159 
3160  // Create the leading tokens for the mnemonic, split by '.' characters.
3161  size_t Start = 0, Next = Name.find('.');
3162  StringRef Head = Name.slice(Start, Next);
3163 
3164  // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3165  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
3166  return parseSysAlias(Head, NameLoc, Operands);
3167 
3168  Operands.push_back(
3169  AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3170  Mnemonic = Head;
3171 
3172  // Handle condition codes for a branch mnemonic
3173  if (Head == "b" && Next != StringRef::npos) {
3174  Start = Next;
3175  Next = Name.find('.', Start + 1);
3176  Head = Name.slice(Start + 1, Next);
3177 
3178  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3179  (Head.data() - Name.data()));
3180  AArch64CC::CondCode CC = parseCondCodeString(Head);
3181  if (CC == AArch64CC::Invalid)
3182  return Error(SuffixLoc, "invalid condition code");
3183  Operands.push_back(
3184  AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3185  Operands.push_back(
3186  AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3187  }
3188 
3189  // Add the remaining tokens in the mnemonic.
3190  while (Next != StringRef::npos) {
3191  Start = Next;
3192  Next = Name.find('.', Start + 1);
3193  Head = Name.slice(Start, Next);
3194  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3195  (Head.data() - Name.data()) + 1);
3196  Operands.push_back(
3197  AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3198  }
3199 
3200  // Conditional compare instructions have a Condition Code operand, which needs
3201  // to be parsed and an immediate operand created.
3202  bool condCodeFourthOperand =
3203  (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3204  Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3205  Head == "csinc" || Head == "csinv" || Head == "csneg");
3206 
3207  // These instructions are aliases to some of the conditional select
3208  // instructions. However, the condition code is inverted in the aliased
3209  // instruction.
3210  //
3211  // FIXME: Is this the correct way to handle these? Or should the parser
3212  // generate the aliased instructions directly?
3213  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3214  bool condCodeThirdOperand =
3215  (Head == "cinc" || Head == "cinv" || Head == "cneg");
3216 
3217  // Read the remaining operands.
3218  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3219  // Read the first operand.
3220  if (parseOperand(Operands, false, false)) {
3221  return true;
3222  }
3223 
3224  unsigned N = 2;
3225  while (parseOptionalToken(AsmToken::Comma)) {
3226  // Parse and remember the operand.
3227  if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3228  (N == 3 && condCodeThirdOperand) ||
3229  (N == 2 && condCodeSecondOperand),
3230  condCodeSecondOperand || condCodeThirdOperand)) {
3231  return true;
3232  }
3233 
3234  // After successfully parsing some operands there are two special cases to
3235  // consider (i.e. notional operands not separated by commas). Both are due
3236  // to memory specifiers:
3237  // + An RBrac will end an address for load/store/prefetch
3238  // + An '!' will indicate a pre-indexed operation.
3239  //
3240  // It's someone else's responsibility to make sure these tokens are sane
3241  // in the given context!
3242 
3243  SMLoc RLoc = Parser.getTok().getLoc();
3244  if (parseOptionalToken(AsmToken::RBrac))
3245  Operands.push_back(
3246  AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3247  SMLoc ELoc = Parser.getTok().getLoc();
3248  if (parseOptionalToken(AsmToken::Exclaim))
3249  Operands.push_back(
3250  AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3251 
3252  ++N;
3253  }
3254  }
3255 
3256  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3257  return true;
3258 
3259  return false;
3260 }
3261 
3262 // FIXME: This entire function is a giant hack to provide us with decent
3263 // operand range validation/diagnostics until TableGen/MC can be extended
3264 // to support autogeneration of this kind of validation.
3265 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3266  SmallVectorImpl<SMLoc> &Loc) {
3267  const MCRegisterInfo *RI = getContext().getRegisterInfo();
3268  // Check for indexed addressing modes w/ the base register being the
3269  // same as a destination/source register or pair load where
3270  // the Rt == Rt2. All of those are undefined behaviour.
3271  switch (Inst.getOpcode()) {
3272  case AArch64::LDPSWpre:
3273  case AArch64::LDPWpost:
3274  case AArch64::LDPWpre:
3275  case AArch64::LDPXpost:
3276  case AArch64::LDPXpre: {
3277  unsigned Rt = Inst.getOperand(1).getReg();
3278  unsigned Rt2 = Inst.getOperand(2).getReg();
3279  unsigned Rn = Inst.getOperand(3).getReg();
3280  if (RI->isSubRegisterEq(Rn, Rt))
3281  return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3282  "is also a destination");
3283  if (RI->isSubRegisterEq(Rn, Rt2))
3284  return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3285  "is also a destination");
3287  }
3288  case AArch64::LDPDi:
3289  case AArch64::LDPQi:
3290  case AArch64::LDPSi:
3291  case AArch64::LDPSWi:
3292  case AArch64::LDPWi:
3293  case AArch64::LDPXi: {
3294  unsigned Rt = Inst.getOperand(0).getReg();
3295  unsigned Rt2 = Inst.getOperand(1).getReg();
3296  if (Rt == Rt2)
3297  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3298  break;
3299  }
3300  case AArch64::LDPDpost:
3301  case AArch64::LDPDpre:
3302  case AArch64::LDPQpost:
3303  case AArch64::LDPQpre:
3304  case AArch64::LDPSpost:
3305  case AArch64::LDPSpre:
3306  case AArch64::LDPSWpost: {
3307  unsigned Rt = Inst.getOperand(1).getReg();
3308  unsigned Rt2 = Inst.getOperand(2).getReg();
3309  if (Rt == Rt2)
3310  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3311  break;
3312  }
3313  case AArch64::STPDpost:
3314  case AArch64::STPDpre:
3315  case AArch64::STPQpost:
3316  case AArch64::STPQpre:
3317  case AArch64::STPSpost:
3318  case AArch64::STPSpre:
3319  case AArch64::STPWpost:
3320  case AArch64::STPWpre:
3321  case AArch64::STPXpost:
3322  case AArch64::STPXpre: {
3323  unsigned Rt = Inst.getOperand(1).getReg();
3324  unsigned Rt2 = Inst.getOperand(2).getReg();
3325  unsigned Rn = Inst.getOperand(3).getReg();
3326  if (RI->isSubRegisterEq(Rn, Rt))
3327  return Error(Loc[0], "unpredictable STP instruction, writeback base "
3328  "is also a source");
3329  if (RI->isSubRegisterEq(Rn, Rt2))
3330  return Error(Loc[1], "unpredictable STP instruction, writeback base "
3331  "is also a source");
3332  break;
3333  }
3334  case AArch64::LDRBBpre:
3335  case AArch64::LDRBpre:
3336  case AArch64::LDRHHpre:
3337  case AArch64::LDRHpre:
3338  case AArch64::LDRSBWpre:
3339  case AArch64::LDRSBXpre:
3340  case AArch64::LDRSHWpre:
3341  case AArch64::LDRSHXpre:
3342  case AArch64::LDRSWpre:
3343  case AArch64::LDRWpre:
3344  case AArch64::LDRXpre:
3345  case AArch64::LDRBBpost:
3346  case AArch64::LDRBpost:
3347  case AArch64::LDRHHpost:
3348  case AArch64::LDRHpost:
3349  case AArch64::LDRSBWpost:
3350  case AArch64::LDRSBXpost:
3351  case AArch64::LDRSHWpost:
3352  case AArch64::LDRSHXpost:
3353  case AArch64::LDRSWpost:
3354  case AArch64::LDRWpost:
3355  case AArch64::LDRXpost: {
3356  unsigned Rt = Inst.getOperand(1).getReg();
3357  unsigned Rn = Inst.getOperand(2).getReg();
3358  if (RI->isSubRegisterEq(Rn, Rt))
3359  return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3360  "is also a source");
3361  break;
3362  }
3363  case AArch64::STRBBpost:
3364  case AArch64::STRBpost:
3365  case AArch64::STRHHpost:
3366  case AArch64::STRHpost:
3367  case AArch64::STRWpost:
3368  case AArch64::STRXpost:
3369  case AArch64::STRBBpre:
3370  case AArch64::STRBpre:
3371  case AArch64::STRHHpre:
3372  case AArch64::STRHpre:
3373  case AArch64::STRWpre:
3374  case AArch64::STRXpre: {
3375  unsigned Rt = Inst.getOperand(1).getReg();
3376  unsigned Rn = Inst.getOperand(2).getReg();
3377  if (RI->isSubRegisterEq(Rn, Rt))
3378  return Error(Loc[0], "unpredictable STR instruction, writeback base "
3379  "is also a source");
3380  break;
3381  }
3382  }
3383 
3384  // Now check immediate ranges. Separate from the above as there is overlap
3385  // in the instructions being checked and this keeps the nested conditionals
3386  // to a minimum.
3387  switch (Inst.getOpcode()) {
3388  case AArch64::ADDSWri:
3389  case AArch64::ADDSXri:
3390  case AArch64::ADDWri:
3391  case AArch64::ADDXri:
3392  case AArch64::SUBSWri:
3393  case AArch64::SUBSXri:
3394  case AArch64::SUBWri:
3395  case AArch64::SUBXri: {
3396  // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3397  // some slight duplication here.
3398  if (Inst.getOperand(2).isExpr()) {
3399  const MCExpr *Expr = Inst.getOperand(2).getExpr();
3400  AArch64MCExpr::VariantKind ELFRefKind;
3401  MCSymbolRefExpr::VariantKind DarwinRefKind;
3402  int64_t Addend;
3403  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3404 
3405  // Only allow these with ADDXri.
3406  if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3407  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3408  Inst.getOpcode() == AArch64::ADDXri)
3409  return false;
3410 
3411  // Only allow these with ADDXri/ADDWri
3412  if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3413  ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3414  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3415  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3416  ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3417  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3418  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3419  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3420  (Inst.getOpcode() == AArch64::ADDXri ||
3421  Inst.getOpcode() == AArch64::ADDWri))
3422  return false;
3423 
3424  // Don't allow symbol refs in the immediate field otherwise
3425  // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
3426  // operands of the original instruction (i.e. 'add w0, w1, borked' vs
3427  // 'cmp w0, 'borked')
3428  return Error(Loc.back(), "invalid immediate expression");
3429  }
3430  // We don't validate more complex expressions here
3431  }
3432  return false;
3433  }
3434  default:
3435  return false;
3436  }
3437 }
3438 
3439 static std::string AArch64MnemonicSpellCheck(StringRef S, uint64_t FBS,
3440  unsigned VariantID = 0);
3441 
3442 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
3443  OperandVector &Operands) {
3444  switch (ErrCode) {
3445  case Match_MissingFeature:
3446  return Error(Loc,
3447  "instruction requires a CPU feature not currently enabled");
3448  case Match_InvalidOperand:
3449  return Error(Loc, "invalid operand for instruction");
3450  case Match_InvalidSuffix:
3451  return Error(Loc, "invalid type suffix for instruction");
3452  case Match_InvalidCondCode:
3453  return Error(Loc, "expected AArch64 condition code");
3454  case Match_AddSubRegExtendSmall:
3455  return Error(Loc,
3456  "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3457  case Match_AddSubRegExtendLarge:
3458  return Error(Loc,
3459  "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3460  case Match_AddSubSecondSource:
3461  return Error(Loc,
3462  "expected compatible register, symbol or integer in range [0, 4095]");
3463  case Match_LogicalSecondSource:
3464  return Error(Loc, "expected compatible register or logical immediate");
3465  case Match_InvalidMovImm32Shift:
3466  return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3467  case Match_InvalidMovImm64Shift:
3468  return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3469  case Match_AddSubRegShift32:
3470  return Error(Loc,
3471  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3472  case Match_AddSubRegShift64:
3473  return Error(Loc,
3474  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3475  case Match_InvalidFPImm:
3476  return Error(Loc,
3477  "expected compatible register or floating-point constant");
3478  case Match_InvalidMemoryIndexedSImm9:
3479  return Error(Loc, "index must be an integer in range [-256, 255].");
3480  case Match_InvalidMemoryIndexedSImm10:
3481  return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
3482  case Match_InvalidMemoryIndexed4SImm7:
3483  return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3484  case Match_InvalidMemoryIndexed8SImm7:
3485  return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3486  case Match_InvalidMemoryIndexed16SImm7:
3487  return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3488  case Match_InvalidMemoryWExtend8:
3489  return Error(Loc,
3490  "expected 'uxtw' or 'sxtw' with optional shift of #0");
3491  case Match_InvalidMemoryWExtend16:
3492  return Error(Loc,
3493  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3494  case Match_InvalidMemoryWExtend32:
3495  return Error(Loc,
3496  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3497  case Match_InvalidMemoryWExtend64:
3498  return Error(Loc,
3499  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3500  case Match_InvalidMemoryWExtend128:
3501  return Error(Loc,
3502  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3503  case Match_InvalidMemoryXExtend8:
3504  return Error(Loc,
3505  "expected 'lsl' or 'sxtx' with optional shift of #0");
3506  case Match_InvalidMemoryXExtend16:
3507  return Error(Loc,
3508  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3509  case Match_InvalidMemoryXExtend32:
3510  return Error(Loc,
3511  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3512  case Match_InvalidMemoryXExtend64:
3513  return Error(Loc,
3514  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3515  case Match_InvalidMemoryXExtend128:
3516  return Error(Loc,
3517  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3518  case Match_InvalidMemoryIndexed1:
3519  return Error(Loc, "index must be an integer in range [0, 4095].");
3520  case Match_InvalidMemoryIndexed2:
3521  return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3522  case Match_InvalidMemoryIndexed4:
3523  return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3524  case Match_InvalidMemoryIndexed8:
3525  return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3526  case Match_InvalidMemoryIndexed16:
3527  return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3528  case Match_InvalidImm0_1:
3529  return Error(Loc, "immediate must be an integer in range [0, 1].");
3530  case Match_InvalidImm0_7:
3531  return Error(Loc, "immediate must be an integer in range [0, 7].");
3532  case Match_InvalidImm0_15:
3533  return Error(Loc, "immediate must be an integer in range [0, 15].");
3534  case Match_InvalidImm0_31:
3535  return Error(Loc, "immediate must be an integer in range [0, 31].");
3536  case Match_InvalidImm0_63:
3537  return Error(Loc, "immediate must be an integer in range [0, 63].");
3538  case Match_InvalidImm0_127:
3539  return Error(Loc, "immediate must be an integer in range [0, 127].");
3540  case Match_InvalidImm0_255:
3541  return Error(Loc, "immediate must be an integer in range [0, 255].");
3542  case Match_InvalidImm0_65535:
3543  return Error(Loc, "immediate must be an integer in range [0, 65535].");
3544  case Match_InvalidImm1_8:
3545  return Error(Loc, "immediate must be an integer in range [1, 8].");
3546  case Match_InvalidImm1_16:
3547  return Error(Loc, "immediate must be an integer in range [1, 16].");
3548  case Match_InvalidImm1_32:
3549  return Error(Loc, "immediate must be an integer in range [1, 32].");
3550  case Match_InvalidImm1_64:
3551  return Error(Loc, "immediate must be an integer in range [1, 64].");
3552  case Match_InvalidIndex1:
3553  return Error(Loc, "expected lane specifier '[1]'");
3554  case Match_InvalidIndexB:
3555  return Error(Loc, "vector lane must be an integer in range [0, 15].");
3556  case Match_InvalidIndexH:
3557  return Error(Loc, "vector lane must be an integer in range [0, 7].");
3558  case Match_InvalidIndexS:
3559  return Error(Loc, "vector lane must be an integer in range [0, 3].");
3560  case Match_InvalidIndexD:
3561  return Error(Loc, "vector lane must be an integer in range [0, 1].");
3562  case Match_InvalidLabel:
3563  return Error(Loc, "expected label or encodable integer pc offset");
3564  case Match_MRS:
3565  return Error(Loc, "expected readable system register");
3566  case Match_MSR:
3567  return Error(Loc, "expected writable system register or pstate");
3568  case Match_InvalidComplexRotationEven:
3569  return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
3570  case Match_InvalidComplexRotationOdd:
3571  return Error(Loc, "complex rotation must be 90 or 270.");
3572  case Match_MnemonicFail: {
3573  std::string Suggestion = AArch64MnemonicSpellCheck(
3574  ((AArch64Operand &)*Operands[0]).getToken(),
3575  ComputeAvailableFeatures(STI->getFeatureBits()));
3576  return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
3577  }
3578  default:
3579  llvm_unreachable("unexpected error code!");
3580  }
3581 }
3582 
3583 static const char *getSubtargetFeatureName(uint64_t Val);
3584 
3585 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3586  OperandVector &Operands,
3587  MCStreamer &Out,
3588  uint64_t &ErrorInfo,
3589  bool MatchingInlineAsm) {
3590  assert(!Operands.empty() && "Unexpect empty operand list!");
3591  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3592  assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3593 
3594  StringRef Tok = Op.getToken();
3595  unsigned NumOperands = Operands.size();
3596 
3597  if (NumOperands == 4 && Tok == "lsl") {
3598  AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3599  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3600  if (Op2.isReg() && Op3.isImm()) {
3601  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3602  if (Op3CE) {
3603  uint64_t Op3Val = Op3CE->getValue();
3604  uint64_t NewOp3Val = 0;
3605  uint64_t NewOp4Val = 0;
3606  if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3607  Op2.getReg())) {
3608  NewOp3Val = (32 - Op3Val) & 0x1f;
3609  NewOp4Val = 31 - Op3Val;
3610  } else {
3611  NewOp3Val = (64 - Op3Val) & 0x3f;
3612  NewOp4Val = 63 - Op3Val;
3613  }
3614 
3615  const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3616  const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3617 
3618  Operands[0] = AArch64Operand::CreateToken(
3619  "ubfm", false, Op.getStartLoc(), getContext());
3620  Operands.push_back(AArch64Operand::CreateImm(
3621  NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3622  Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3623  Op3.getEndLoc(), getContext());
3624  }
3625  }
3626  } else if (NumOperands == 4 && Tok == "bfc") {
3627  // FIXME: Horrible hack to handle BFC->BFM alias.
3628  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3629  AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3630  AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3631 
3632  if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3633  const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3634  const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3635 
3636  if (LSBCE && WidthCE) {
3637  uint64_t LSB = LSBCE->getValue();
3638  uint64_t Width = WidthCE->getValue();
3639 
3640  uint64_t RegWidth = 0;
3641  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3642  Op1.getReg()))
3643  RegWidth = 64;
3644  else
3645  RegWidth = 32;
3646 
3647  if (LSB >= RegWidth)
3648  return Error(LSBOp.getStartLoc(),
3649  "expected integer in range [0, 31]");
3650  if (Width < 1 || Width > RegWidth)
3651  return Error(WidthOp.getStartLoc(),
3652  "expected integer in range [1, 32]");
3653 
3654  uint64_t ImmR = 0;
3655  if (RegWidth == 32)
3656  ImmR = (32 - LSB) & 0x1f;
3657  else
3658  ImmR = (64 - LSB) & 0x3f;
3659 
3660  uint64_t ImmS = Width - 1;
3661 
3662  if (ImmR != 0 && ImmS >= ImmR)
3663  return Error(WidthOp.getStartLoc(),
3664  "requested insert overflows register");
3665 
3666  const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3667  const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3668  Operands[0] = AArch64Operand::CreateToken(
3669  "bfm", false, Op.getStartLoc(), getContext());
3670  Operands[2] = AArch64Operand::CreateReg(
3671  RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
3672  SMLoc(), SMLoc(), getContext());
3673  Operands[3] = AArch64Operand::CreateImm(
3674  ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3675  Operands.emplace_back(
3676  AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3677  WidthOp.getEndLoc(), getContext()));
3678  }
3679  }
3680  } else if (NumOperands == 5) {
3681  // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3682  // UBFIZ -> UBFM aliases.
3683  if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3684  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3685  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3686  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3687 
3688  if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3689  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3690  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3691 
3692  if (Op3CE && Op4CE) {
3693  uint64_t Op3Val = Op3CE->getValue();
3694  uint64_t Op4Val = Op4CE->getValue();
3695 
3696  uint64_t RegWidth = 0;
3697  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3698  Op1.getReg()))
3699  RegWidth = 64;
3700  else
3701  RegWidth = 32;
3702 
3703  if (Op3Val >= RegWidth)
3704  return Error(Op3.getStartLoc(),
3705  "expected integer in range [0, 31]");
3706  if (Op4Val < 1 || Op4Val > RegWidth)
3707  return Error(Op4.getStartLoc(),
3708  "expected integer in range [1, 32]");
3709 
3710  uint64_t NewOp3Val = 0;
3711  if (RegWidth == 32)
3712  NewOp3Val = (32 - Op3Val) & 0x1f;
3713  else
3714  NewOp3Val = (64 - Op3Val) & 0x3f;
3715 
3716  uint64_t NewOp4Val = Op4Val - 1;
3717 
3718  if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3719  return Error(Op4.getStartLoc(),
3720  "requested insert overflows register");
3721 
3722  const MCExpr *NewOp3 =
3723  MCConstantExpr::create(NewOp3Val, getContext());
3724  const MCExpr *NewOp4 =
3725  MCConstantExpr::create(NewOp4Val, getContext());
3726  Operands[3] = AArch64Operand::CreateImm(
3727  NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3728  Operands[4] = AArch64Operand::CreateImm(
3729  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3730  if (Tok == "bfi")
3731  Operands[0] = AArch64Operand::CreateToken(
3732  "bfm", false, Op.getStartLoc(), getContext());
3733  else if (Tok == "sbfiz")
3734  Operands[0] = AArch64Operand::CreateToken(
3735  "sbfm", false, Op.getStartLoc(), getContext());
3736  else if (Tok == "ubfiz")
3737  Operands[0] = AArch64Operand::CreateToken(
3738  "ubfm", false, Op.getStartLoc(), getContext());
3739  else
3740  llvm_unreachable("No valid mnemonic for alias?");
3741  }
3742  }
3743 
3744  // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3745  // UBFX -> UBFM aliases.
3746  } else if (NumOperands == 5 &&
3747  (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3748  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3749  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3750  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3751 
3752  if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3753  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3754  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3755 
3756  if (Op3CE && Op4CE) {
3757  uint64_t Op3Val = Op3CE->getValue();
3758  uint64_t Op4Val = Op4CE->getValue();
3759 
3760  uint64_t RegWidth = 0;
3761  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3762  Op1.getReg()))
3763  RegWidth = 64;
3764  else
3765  RegWidth = 32;
3766 
3767  if (Op3Val >= RegWidth)
3768  return Error(Op3.getStartLoc(),
3769  "expected integer in range [0, 31]");
3770  if (Op4Val < 1 || Op4Val > RegWidth)
3771  return Error(Op4.getStartLoc(),
3772  "expected integer in range [1, 32]");
3773 
3774  uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3775 
3776  if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3777  return Error(Op4.getStartLoc(),
3778  "requested extract overflows register");
3779 
3780  const MCExpr *NewOp4 =
3781  MCConstantExpr::create(NewOp4Val, getContext());
3782  Operands[4] = AArch64Operand::CreateImm(
3783  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3784  if (Tok == "bfxil")
3785  Operands[0] = AArch64Operand::CreateToken(
3786  "bfm", false, Op.getStartLoc(), getContext());
3787  else if (Tok == "sbfx")
3788  Operands[0] = AArch64Operand::CreateToken(
3789  "sbfm", false, Op.getStartLoc(), getContext());
3790  else if (Tok == "ubfx")
3791  Operands[0] = AArch64Operand::CreateToken(
3792  "ubfm", false, Op.getStartLoc(), getContext());
3793  else
3794  llvm_unreachable("No valid mnemonic for alias?");
3795  }
3796  }
3797  }
3798  }
3799  // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3800  // InstAlias can't quite handle this since the reg classes aren't
3801  // subclasses.
3802  if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3803  // The source register can be Wn here, but the matcher expects a
3804  // GPR64. Twiddle it here if necessary.
3805  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3806  if (Op.isReg()) {
3807  unsigned Reg = getXRegFromWReg(Op.getReg());
3808  Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
3809  Op.getStartLoc(), Op.getEndLoc(),
3810  getContext());
3811  }
3812  }
3813  // FIXME: Likewise for sxt[bh] with a Xd dst operand
3814  else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3815  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3816  if (Op.isReg() &&
3817  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3818  Op.getReg())) {
3819  // The source register can be Wn here, but the matcher expects a
3820  // GPR64. Twiddle it here if necessary.
3821  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3822  if (Op.isReg()) {
3823  unsigned Reg = getXRegFromWReg(Op.getReg());
3824  Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
3825  Op.getStartLoc(),
3826  Op.getEndLoc(), getContext());
3827  }
3828  }
3829  }
3830  // FIXME: Likewise for uxt[bh] with a Xd dst operand
3831  else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3832  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3833  if (Op.isReg() &&
3834  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3835  Op.getReg())) {
3836  // The source register can be Wn here, but the matcher expects a
3837  // GPR32. Twiddle it here if necessary.
3838  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3839  if (Op.isReg()) {
3840  unsigned Reg = getWRegFromXReg(Op.getReg());
3841  Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
3842  Op.getStartLoc(),
3843  Op.getEndLoc(), getContext());
3844  }
3845  }
3846  }
3847 
3848  MCInst Inst;
3849  // First try to match against the secondary set of tables containing the
3850  // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3851  unsigned MatchResult =
3852  MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3853 
3854  // If that fails, try against the alternate table containing long-form NEON:
3855  // "fadd v0.2s, v1.2s, v2.2s"
3856  if (MatchResult != Match_Success) {
3857  // But first, save the short-form match result: we can use it in case the
3858  // long-form match also fails.
3859  auto ShortFormNEONErrorInfo = ErrorInfo;
3860  auto ShortFormNEONMatchResult = MatchResult;
3861 
3862  MatchResult =
3863  MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3864 
3865  // Now, both matches failed, and the long-form match failed on the mnemonic
3866  // suffix token operand. The short-form match failure is probably more
3867  // relevant: use it instead.
3868  if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
3869  Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
3870  ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
3871  MatchResult = ShortFormNEONMatchResult;
3872  ErrorInfo = ShortFormNEONErrorInfo;
3873  }
3874  }
3875 
3876  switch (MatchResult) {
3877  case Match_Success: {
3878  // Perform range checking and other semantic validations
3879  SmallVector<SMLoc, 8> OperandLocs;
3880  NumOperands = Operands.size();
3881  for (unsigned i = 1; i < NumOperands; ++i)
3882  OperandLocs.push_back(Operands[i]->getStartLoc());
3883  if (validateInstruction(Inst, OperandLocs))
3884  return true;
3885 
3886  Inst.setLoc(IDLoc);
3887  Out.EmitInstruction(Inst, getSTI());
3888  return false;
3889  }
3890  case Match_MissingFeature: {
3891  assert(ErrorInfo && "Unknown missing feature!");
3892  // Special case the error message for the very common case where only
3893  // a single subtarget feature is missing (neon, e.g.).
3894  std::string Msg = "instruction requires:";
3895  uint64_t Mask = 1;
3896  for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3897  if (ErrorInfo & Mask) {
3898  Msg += " ";
3899  Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3900  }
3901  Mask <<= 1;
3902  }
3903  return Error(IDLoc, Msg);
3904  }
3905  case Match_MnemonicFail:
3906  return showMatchError(IDLoc, MatchResult, Operands);
3907  case Match_InvalidOperand: {
3908  SMLoc ErrorLoc = IDLoc;
3909 
3910  if (ErrorInfo != ~0ULL) {
3911  if (ErrorInfo >= Operands.size())
3912  return Error(IDLoc, "too few operands for instruction",
3913  SMRange(IDLoc, getTok().getLoc()));
3914 
3915  ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3916  if (ErrorLoc == SMLoc())
3917  ErrorLoc = IDLoc;
3918  }
3919  // If the match failed on a suffix token operand, tweak the diagnostic
3920  // accordingly.
3921  if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3922  ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3923  MatchResult = Match_InvalidSuffix;
3924 
3925  return showMatchError(ErrorLoc, MatchResult, Operands);
3926  }
3927  case Match_InvalidMemoryIndexed1:
3928  case Match_InvalidMemoryIndexed2:
3929  case Match_InvalidMemoryIndexed4:
3930  case Match_InvalidMemoryIndexed8:
3931  case Match_InvalidMemoryIndexed16:
3932  case Match_InvalidCondCode:
3933  case Match_AddSubRegExtendSmall:
3934  case Match_AddSubRegExtendLarge:
3935  case Match_AddSubSecondSource:
3936  case Match_LogicalSecondSource:
3937  case Match_AddSubRegShift32:
3938  case Match_AddSubRegShift64:
3939  case Match_InvalidMovImm32Shift:
3940  case Match_InvalidMovImm64Shift:
3941  case Match_InvalidFPImm:
3942  case Match_InvalidMemoryWExtend8:
3943  case Match_InvalidMemoryWExtend16:
3944  case Match_InvalidMemoryWExtend32:
3945  case Match_InvalidMemoryWExtend64:
3946  case Match_InvalidMemoryWExtend128:
3947  case Match_InvalidMemoryXExtend8:
3948  case Match_InvalidMemoryXExtend16:
3949  case Match_InvalidMemoryXExtend32:
3950  case Match_InvalidMemoryXExtend64:
3951  case Match_InvalidMemoryXExtend128:
3952  case Match_InvalidMemoryIndexed4SImm7:
3953  case Match_InvalidMemoryIndexed8SImm7:
3954  case Match_InvalidMemoryIndexed16SImm7:
3955  case Match_InvalidMemoryIndexedSImm9:
3956  case Match_InvalidMemoryIndexedSImm10:
3957  case Match_InvalidImm0_1:
3958  case Match_InvalidImm0_7:
3959  case Match_InvalidImm0_15:
3960  case Match_InvalidImm0_31:
3961  case Match_InvalidImm0_63:
3962  case Match_InvalidImm0_127:
3963  case Match_InvalidImm0_255:
3964  case Match_InvalidImm0_65535:
3965  case Match_InvalidImm1_8:
3966  case Match_InvalidImm1_16:
3967  case Match_InvalidImm1_32:
3968  case Match_InvalidImm1_64:
3969  case Match_InvalidIndex1:
3970  case Match_InvalidIndexB:
3971  case Match_InvalidIndexH:
3972  case Match_InvalidIndexS:
3973  case Match_InvalidIndexD:
3974  case Match_InvalidLabel:
3975  case Match_InvalidComplexRotationEven:
3976  case Match_InvalidComplexRotationOdd:
3977  case Match_MSR:
3978  case Match_MRS: {
3979  if (ErrorInfo >= Operands.size())
3980  return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
3981  // Any time we get here, there's nothing fancy to do. Just get the
3982  // operand SMLoc and display the diagnostic.
3983  SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3984  if (ErrorLoc == SMLoc())
3985  ErrorLoc = IDLoc;
3986  return showMatchError(ErrorLoc, MatchResult, Operands);
3987  }
3988  }
3989 
3990  llvm_unreachable("Implement any new match types added!");
3991 }
3992 
3993 /// ParseDirective parses the arm specific directives
3994 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3996  getContext().getObjectFileInfo()->getObjectFileType();
3997  bool IsMachO = Format == MCObjectFileInfo::IsMachO;
3998  bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
3999 
4000  StringRef IDVal = DirectiveID.getIdentifier();
4001  SMLoc Loc = DirectiveID.getLoc();
4002  if (IDVal == ".arch")
4003  parseDirectiveArch(Loc);
4004  else if (IDVal == ".cpu")
4005  parseDirectiveCPU(Loc);
4006  else if (IDVal == ".hword")
4007  parseDirectiveWord(2, Loc);
4008  else if (IDVal == ".word")
4009  parseDirectiveWord(4, Loc);
4010  else if (IDVal == ".xword")
4011  parseDirectiveWord(8, Loc);
4012  else if (IDVal == ".tlsdesccall")
4013  parseDirectiveTLSDescCall(Loc);
4014  else if (IDVal == ".ltorg" || IDVal == ".pool")
4015  parseDirectiveLtorg(Loc);
4016  else if (IDVal == ".unreq")
4017  parseDirectiveUnreq(Loc);
4018  else if (!IsMachO && !IsCOFF) {
4019  if (IDVal == ".inst")
4020  parseDirectiveInst(Loc);
4021  else
4022  return true;
4023  } else if (IDVal == MCLOHDirectiveName())
4024  parseDirectiveLOH(IDVal, Loc);
4025  else
4026  return true;
4027  return false;
4028 }
4029 
4030 static const struct {
4031  const char *Name;
4033 } ExtensionMap[] = {
4034  { "crc", {AArch64::FeatureCRC} },
4035  { "crypto", {AArch64::FeatureCrypto} },
4036  { "fp", {AArch64::FeatureFPARMv8} },
4037  { "simd", {AArch64::FeatureNEON} },
4038  { "ras", {AArch64::FeatureRAS} },
4039  { "lse", {AArch64::FeatureLSE} },
4040 
4041  // FIXME: Unsupported extensions
4042  { "pan", {} },
4043  { "lor", {} },
4044  { "rdma", {} },
4045  { "profile", {} },
4046 };
4047 
4048 /// parseDirectiveArch
4049 /// ::= .arch token
4050 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
4051  SMLoc ArchLoc = getLoc();
4052 
4053  StringRef Arch, ExtensionString;
4054  std::tie(Arch, ExtensionString) =
4055  getParser().parseStringToEndOfStatement().trim().split('+');
4056 
4058  if (ID == AArch64::ArchKind::INVALID)
4059  return Error(ArchLoc, "unknown arch name");
4060 
4061  if (parseToken(AsmToken::EndOfStatement))
4062  return true;
4063 
4064  // Get the architecture and extension features.
4065  std::vector<StringRef> AArch64Features;
4066  AArch64::getArchFeatures(ID, AArch64Features);
4068  AArch64Features);
4069 
4070  MCSubtargetInfo &STI = copySTI();
4071  std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
4072  STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
4073 
4074  SmallVector<StringRef, 4> RequestedExtensions;
4075  if (!ExtensionString.empty())
4076  ExtensionString.split(RequestedExtensions, '+');
4077 
4079  for (auto Name : RequestedExtensions) {
4080  bool EnableFeature = true;
4081 
4082  if (Name.startswith_lower("no")) {
4083  EnableFeature = false;
4084  Name = Name.substr(2);
4085  }
4086 
4087  for (const auto &Extension : ExtensionMap) {
4088  if (Extension.Name != Name)
4089  continue;
4090 
4091  if (Extension.Features.none())
4092  report_fatal_error("unsupported architectural extension: " + Name);
4093 
4094  FeatureBitset ToggleFeatures = EnableFeature
4095  ? (~Features & Extension.Features)
4096  : ( Features & Extension.Features);
4097  uint64_t Features =
4098  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
4099  setAvailableFeatures(Features);
4100  break;
4101  }
4102  }
4103  return false;
4104 }
4105 
4106 static SMLoc incrementLoc(SMLoc L, int Offset) {
4107  return SMLoc::getFromPointer(L.getPointer() + Offset);
4108 }
4109 
4110 /// parseDirectiveCPU
4111 /// ::= .cpu id
4112 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
4113  SMLoc CurLoc = getLoc();
4114 
4115  StringRef CPU, ExtensionString;
4116  std::tie(CPU, ExtensionString) =
4117  getParser().parseStringToEndOfStatement().trim().split('+');
4118 
4119  if (parseToken(AsmToken::EndOfStatement))
4120  return true;
4121 
4122  SmallVector<StringRef, 4> RequestedExtensions;
4123  if (!ExtensionString.empty())
4124  ExtensionString.split(RequestedExtensions, '+');
4125 
4126  // FIXME This is using tablegen data, but should be moved to ARMTargetParser
4127  // once that is tablegen'ed
4128  if (!getSTI().isCPUStringValid(CPU)) {
4129  Error(CurLoc, "unknown CPU name");
4130  return false;
4131  }
4132 
4133  MCSubtargetInfo &STI = copySTI();
4134  STI.setDefaultFeatures(CPU, "");
4135  CurLoc = incrementLoc(CurLoc, CPU.size());
4136 
4138  for (auto Name : RequestedExtensions) {
4139  // Advance source location past '+'.
4140  CurLoc = incrementLoc(CurLoc, 1);
4141 
4142  bool EnableFeature = true;
4143 
4144  if (Name.startswith_lower("no")) {
4145  EnableFeature = false;
4146  Name = Name.substr(2);
4147  }
4148 
4149  bool FoundExtension = false;
4150  for (const auto &Extension : ExtensionMap) {
4151  if (Extension.Name != Name)
4152  continue;
4153 
4154  if (Extension.Features.none())
4155  report_fatal_error("unsupported architectural extension: " + Name);
4156 
4157  FeatureBitset ToggleFeatures = EnableFeature
4158  ? (~Features & Extension.Features)
4159  : ( Features & Extension.Features);
4160  uint64_t Features =
4161  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
4162  setAvailableFeatures(Features);
4163  FoundExtension = true;
4164 
4165  break;
4166  }
4167 
4168  if (!FoundExtension)
4169  Error(CurLoc, "unsupported architectural extension");
4170 
4171  CurLoc = incrementLoc(CurLoc, Name.size());
4172  }
4173  return false;
4174 }
4175 
4176 /// parseDirectiveWord
4177 /// ::= .word [ expression (, expression)* ]
4178 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4179  auto parseOp = [&]() -> bool {
4180  const MCExpr *Value;
4181  if (getParser().parseExpression(Value))
4182  return true;
4183  getParser().getStreamer().EmitValue(Value, Size, L);
4184  return false;
4185  };
4186 
4187  if (parseMany(parseOp))
4188  return true;
4189  return false;
4190 }
4191 
4192 /// parseDirectiveInst
4193 /// ::= .inst opcode [, ...]
4194 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4195  if (getLexer().is(AsmToken::EndOfStatement))
4196  return Error(Loc, "expected expression following '.inst' directive");
4197 
4198  auto parseOp = [&]() -> bool {
4199  SMLoc L = getLoc();
4200  const MCExpr *Expr;
4201  if (check(getParser().parseExpression(Expr), L, "expected expression"))
4202  return true;
4203  const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4204  if (check(!Value, L, "expected constant expression"))
4205  return true;
4206  getTargetStreamer().emitInst(Value->getValue());
4207  return false;
4208  };
4209 
4210  if (parseMany(parseOp))
4211  return addErrorSuffix(" in '.inst' directive");
4212  return false;
4213 }
4214 
4215 // parseDirectiveTLSDescCall:
4216 // ::= .tlsdesccall symbol
4217 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4218  StringRef Name;
4219  if (check(getParser().parseIdentifier(Name), L,
4220  "expected symbol after directive") ||
4221  parseToken(AsmToken::EndOfStatement))
4222  return true;
4223 
4224  MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4225  const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4226  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4227 
4228  MCInst Inst;
4229  Inst.setOpcode(AArch64::TLSDESCCALL);
4230  Inst.addOperand(MCOperand::createExpr(Expr));
4231 
4232  getParser().getStreamer().EmitInstruction(Inst, getSTI());
4233  return false;
4234 }
4235 
4236 /// ::= .loh <lohName | lohId> label1, ..., labelN
4237 /// The number of arguments depends on the loh identifier.
4238 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4239  MCLOHType Kind;
4240  if (getParser().getTok().isNot(AsmToken::Identifier)) {
4241  if (getParser().getTok().isNot(AsmToken::Integer))
4242  return TokError("expected an identifier or a number in directive");
4243  // We successfully get a numeric value for the identifier.
4244  // Check if it is valid.
4245  int64_t Id = getParser().getTok().getIntVal();
4246  if (Id <= -1U && !isValidMCLOHType(Id))
4247  return TokError("invalid numeric identifier in directive");
4248  Kind = (MCLOHType)Id;
4249  } else {
4250  StringRef Name = getTok().getIdentifier();
4251  // We successfully parse an identifier.
4252  // Check if it is a recognized one.
4253  int Id = MCLOHNameToId(Name);
4254 
4255  if (Id == -1)
4256  return TokError("invalid identifier in directive");
4257  Kind = (MCLOHType)Id;
4258  }
4259  // Consume the identifier.
4260  Lex();
4261  // Get the number of arguments of this LOH.
4262  int NbArgs = MCLOHIdToNbArgs(Kind);
4263 
4264  assert(NbArgs != -1 && "Invalid number of arguments");
4265 
4267  for (int Idx = 0; Idx < NbArgs; ++Idx) {
4268  StringRef Name;
4269  if (getParser().parseIdentifier(Name))
4270  return TokError("expected identifier in directive");
4271  Args.push_back(getContext().getOrCreateSymbol(Name));
4272 
4273  if (Idx + 1 == NbArgs)
4274  break;
4275  if (parseToken(AsmToken::Comma,
4276  "unexpected token in '" + Twine(IDVal) + "' directive"))
4277  return true;
4278  }
4279  if (parseToken(AsmToken::EndOfStatement,
4280  "unexpected token in '" + Twine(IDVal) + "' directive"))
4281  return true;
4282 
4283  getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4284  return false;
4285 }
4286 
4287 /// parseDirectiveLtorg
4288 /// ::= .ltorg | .pool
4289 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4290  if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
4291  return true;
4292  getTargetStreamer().emitCurrentConstantPool();
4293  return false;
4294 }
4295 
4296 /// parseDirectiveReq
4297 /// ::= name .req registername
4298 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4299  MCAsmParser &Parser = getParser();
4300  Parser.Lex(); // Eat the '.req' token.
4301  SMLoc SRegLoc = getLoc();
4302  int RegNum = tryParseRegister();
4304 
4305  if (RegNum == -1) {
4306  StringRef Kind;
4307  RegisterKind = RegKind::NeonVector;
4308  RegNum = tryMatchVectorRegister(Kind, false);
4309  if (!Kind.empty())
4310  return Error(SRegLoc, "vector register without type specifier expected");
4311  }
4312 
4313  if (RegNum == -1) {
4314  StringRef Kind;
4315  RegisterKind = RegKind::SVEDataVector;
4316  OperandMatchResultTy Res =
4317  tryParseSVERegister(RegNum, Kind, RegKind::SVEDataVector);
4318 
4319  if (Res == MatchOperand_ParseFail)
4320  return true;
4321 
4322  if (Res == MatchOperand_Success && !Kind.empty())
4323  return Error(SRegLoc,
4324  "sve vector register without type specifier expected");
4325  }
4326 
4327  if (RegNum == -1)
4328  return Error(SRegLoc, "register name or alias expected");
4329 
4330  // Shouldn't be anything else.
4331  if (parseToken(AsmToken::EndOfStatement,
4332  "unexpected input in .req directive"))
4333  return true;
4334 
4335  auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
4336  if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4337  Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4338 
4339  return false;
4340 }
4341 
4342 /// parseDirectiveUneq
4343 /// ::= .unreq registername
4344 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4345  MCAsmParser &Parser = getParser();
4346  if (getTok().isNot(AsmToken::Identifier))
4347  return TokError("unexpected input in .unreq directive.");
4348  RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4349  Parser.Lex(); // Eat the identifier.
4350  if (parseToken(AsmToken::EndOfStatement))
4351  return addErrorSuffix("in '.unreq' directive");
4352  return false;
4353 }
4354 
4355 bool
4356 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4357  AArch64MCExpr::VariantKind &ELFRefKind,
4358  MCSymbolRefExpr::VariantKind &DarwinRefKind,
4359  int64_t &Addend) {
4360  ELFRefKind = AArch64MCExpr::VK_INVALID;
4361  DarwinRefKind = MCSymbolRefExpr::VK_None;
4362  Addend = 0;
4363 
4364  if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4365  ELFRefKind = AE->getKind();
4366  Expr = AE->getSubExpr();
4367  }
4368 
4369  const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4370  if (SE) {
4371  // It's a simple symbol reference with no addend.
4372  DarwinRefKind = SE->getKind();
4373  return true;
4374  }
4375 
4376  const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4377  if (!BE)
4378  return false;
4379 
4380  SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4381  if (!SE)
4382  return false;
4383  DarwinRefKind = SE->getKind();
4384 
4385  if (BE->getOpcode() != MCBinaryExpr::Add &&
4386  BE->getOpcode() != MCBinaryExpr::Sub)
4387  return false;
4388 
4389  // See if the addend is is a constant, otherwise there's more going
4390  // on here than we can deal with.
4391  auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4392  if (!AddendExpr)
4393  return false;
4394 
4395  Addend = AddendExpr->getValue();
4396  if (BE->getOpcode() == MCBinaryExpr::Sub)
4397  Addend = -Addend;
4398 
4399  // It's some symbol reference + a constant addend, but really
4400  // shouldn't use both Darwin and ELF syntax.
4401  return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4402  DarwinRefKind == MCSymbolRefExpr::VK_None;
4403 }
4404 
4405 /// Force static initialization.
4410 }
4411 
4412 #define GET_REGISTER_MATCHER
4413 #define GET_SUBTARGET_FEATURE_NAME
4414 #define GET_MATCHER_IMPLEMENTATION
4415 #define GET_MNEMONIC_SPELL_CHECKER
4416 #include "AArch64GenAsmMatcher.inc"
4417 
4418 // Define this matcher function after the auto-generated include so we
4419 // have the match class enum definitions.
4420 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4421  unsigned Kind) {
4422  AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4423  // If the kind is a token for a literal immediate, check if our asm
4424  // operand matches. This is for InstAliases which have a fixed-value
4425  // immediate in the syntax.
4426  int64_t ExpectedVal;
4427  switch (Kind) {
4428  default:
4429  return Match_InvalidOperand;
4430  case MCK__35_0:
4431  ExpectedVal = 0;
4432  break;
4433  case MCK__35_1:
4434  ExpectedVal = 1;
4435  break;
4436  case MCK__35_12:
4437  ExpectedVal = 12;
4438  break;
4439  case MCK__35_16:
4440  ExpectedVal = 16;
4441  break;
4442  case MCK__35_2:
4443  ExpectedVal = 2;
4444  break;
4445  case MCK__35_24:
4446  ExpectedVal = 24;
4447  break;
4448  case MCK__35_3:
4449  ExpectedVal = 3;
4450  break;
4451  case MCK__35_32:
4452  ExpectedVal = 32;
4453  break;
4454  case MCK__35_4:
4455  ExpectedVal = 4;
4456  break;
4457  case MCK__35_48:
4458  ExpectedVal = 48;
4459  break;
4460  case MCK__35_6:
4461  ExpectedVal = 6;
4462  break;
4463  case MCK__35_64:
4464  ExpectedVal = 64;
4465  break;
4466  case MCK__35_8:
4467  ExpectedVal = 8;
4468  break;
4469  }
4470  if (!Op.isImm())
4471  return Match_InvalidOperand;
4472  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4473  if (!CE)
4474  return Match_InvalidOperand;
4475  if (CE->getValue() == ExpectedVal)
4476  return Match_Success;
4477  return Match_InvalidOperand;
4478 }
4479 
4481 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4482 
4483  SMLoc S = getLoc();
4484 
4485  if (getParser().getTok().isNot(AsmToken::Identifier)) {
4486  Error(S, "expected register");
4487  return MatchOperand_ParseFail;
4488  }
4489 
4490  int FirstReg = tryParseRegister();
4491  if (FirstReg == -1) {
4492  return MatchOperand_ParseFail;
4493  }
4494  const MCRegisterClass &WRegClass =
4495  AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4496  const MCRegisterClass &XRegClass =
4497  AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4498 
4499  bool isXReg = XRegClass.contains(FirstReg),
4500  isWReg = WRegClass.contains(FirstReg);
4501  if (!isXReg && !isWReg) {
4502  Error(S, "expected first even register of a "
4503  "consecutive same-size even/odd register pair");
4504  return MatchOperand_ParseFail;
4505  }
4506 
4507  const MCRegisterInfo *RI = getContext().getRegisterInfo();
4508  unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4509 
4510  if (FirstEncoding & 0x1) {
4511  Error(S, "expected first even register of a "
4512  "consecutive same-size even/odd register pair");
4513  return MatchOperand_ParseFail;
4514  }
4515 
4516  SMLoc M = getLoc();
4517  if (getParser().getTok().isNot(AsmToken::Comma)) {
4518  Error(M, "expected comma");
4519  return MatchOperand_ParseFail;
4520  }
4521  // Eat the comma
4522  getParser().Lex();
4523 
4524  SMLoc E = getLoc();
4525  int SecondReg = tryParseRegister();
4526  if (SecondReg ==-1) {
4527  return MatchOperand_ParseFail;
4528  }
4529 
4530  if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4531  (isXReg && !XRegClass.contains(SecondReg)) ||
4532  (isWReg && !WRegClass.contains(SecondReg))) {
4533  Error(E,"expected second odd register of a "
4534  "consecutive same-size even/odd register pair");
4535  return MatchOperand_ParseFail;
4536  }
4537 
4538  unsigned Pair = 0;
4539  if (isXReg) {
4540  Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4541  &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4542  } else {
4543  Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4544  &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4545  }
4546 
4547  Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
4548  getLoc(), getContext()));
4549 
4550  return MatchOperand_Success;
4551 }
4552 
4553 template <bool ParseSuffix>
4555 AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
4556  const SMLoc S = getLoc();
4557  // Check for a SVE vector register specifier first.
4558  int RegNum = -1;
4559  StringRef Kind;
4560 
4561  OperandMatchResultTy Res =
4562  tryParseSVERegister(RegNum, Kind, RegKind::SVEDataVector);
4563 
4564  if (Res != MatchOperand_Success)
4565  return Res;
4566 
4567  if (ParseSuffix && Kind.empty())
4568  return MatchOperand_NoMatch;
4569 
4570  unsigned ElementWidth = StringSwitch<unsigned>(Kind.lower())
4571  .Case("", -1)
4572  .Case(".b", 8)
4573  .Case(".h", 16)
4574  .Case(".s", 32)
4575  .Case(".d", 64)
4576  .Case(".q", 128)
4577  .Default(0);
4578  if (!ElementWidth)
4579  return MatchOperand_NoMatch;
4580 
4581  Operands.push_back(
4582  AArch64Operand::CreateReg(RegNum, RegKind::SVEDataVector, ElementWidth,
4583  S, S, getContext()));
4584 
4585  return MatchOperand_Success;
4586 }
static bool isValidVectorKind(StringRef Name)
static bool isReg(const MCInst &MI, unsigned OpNo)
static bool isSVEDataVectorRegister(StringRef Name)
Represents a range in source code.
Definition: SMLoc.h:49
void push_back(const T &Elt)
Definition: SmallVector.h:212
Target & getTheAArch64beTarget()
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static float getFPImmFloat(unsigned Imm)
LLVM_NODISCARD bool startswith_lower(StringRef Prefix) const
Check if this string starts with the given Prefix, ignoring case.
Definition: StringRef.cpp:62
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition: MCAsmLexer.h:116
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:305
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:115
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:42
VariantKind getKind() const
Definition: MCExpr.h:320
LLVM_NODISCARD bool equals_lower(StringRef RHS) const
equals_lower - Check for string equality, ignoring case.
Definition: StringRef.h:176
static const AArch64MCExpr * create(const MCExpr *Expr, VariantKind Kind, MCContext &Ctx)
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
Generic assembler parser interface, for use by target specific assembly parsers.
Definition: MCAsmParser.h:110
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
Target & getTheAArch64leTarget()
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:137
MCTargetAsmParser - Generic interface to target specific assembly parsers.
static CondCode getInvertedCondCode(CondCode Code)
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
Target specific streamer interface.
Definition: MCStreamer.h:80
bool isNot(TokenKind K) const
Definition: MCAsmLexer.h:89
const MCExpr * getLHS() const
Get the left-hand side expression of the binary operator.
Definition: MCExpr.h:546
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
static unsigned getXRegFromWReg(unsigned Reg)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
void changeSign()
Definition: APFloat.h:1050
const AsmToken & getTok() const
Get the current AsmToken from the stream.
Definition: MCAsmParser.cpp:33
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:128
virtual void EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI, bool PrintSchedInfo=false)
Emit the given Instruction into the current section.
Definition: MCStreamer.cpp:863
return AArch64::GPR64RegClass contains(Reg)
static SMLoc incrementLoc(SMLoc L, int Offset)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string...
Definition: MCAsmLexer.h:105
static MCOperand createReg(unsigned Reg)
Definition: MCInst.h:116
static ManagedStatic< DebugCounter > DC
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
bool contains(unsigned Reg) const
contains - Return true if the specified register is included in this register class.
const FeatureBitset & getFeatureBits() const
getFeatureBits - Return the feature bits.
RegisterKind
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:36
Reg
All possible values of the reg field in the ModR/M byte.
Target independent representation for an assembler token.
Definition: MCAsmLexer.h:27
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:165
bool getExtensionFeatures(unsigned Extensions, std::vector< StringRef > &Features)
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
std::string join(IteratorT Begin, IteratorT End, StringRef Separator)
Joins the strings in the range [Begin, End), adding Separator between the elements.
Definition: StringExtras.h:327
Target & getTheARM64Target()
static bool isMem(const MachineInstr &MI, unsigned Op)
Definition: X86InstrInfo.h:137
zlib-gnu style compression
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand...
This file implements a class to represent arbitrary precision integral constant values and operations...
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:65
AArch64::ArchKind parseArch(StringRef Arch)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
Context object for machine code objects.
Definition: MCContext.h:59
std::pair< StringRef, StringRef > getToken(StringRef Source, StringRef Delimiters=" \\\)
getToken - This function extracts one token from source, ignoring any leading characters that appear ...
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool startswith(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:267
LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch & Case(const char(&S)[N], const T &Value)
Definition: StringSwitch.h:74
RegisterMCAsmParser - Helper template for registering a target specific assembly parser, for use in the target machine initialization function.
const MCExpr * getRHS() const
Get the right-hand side expression of the binary operator.
Definition: MCExpr.h:549
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:133
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
SMLoc getLoc() const
Definition: MCAsmLexer.cpp:26
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:916
const MCExpr * getExpr() const
Definition: MCInst.h:96
static const fltSemantics & IEEEdouble() LLVM_READNONE
Definition: APFloat.cpp:122
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:598
MCRegisterClass - Base class of TargetRegisterClass.
FeatureBitset getRequiredFeatures() const
const char * Name
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:159
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
static unsigned getWRegFromXReg(unsigned Reg)
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
FeatureBitset ToggleFeature(uint64_t FB)
ToggleFeature - Toggle a feature and returns the re-computed feature bits.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
const char * getPointer() const
Definition: SMLoc.h:35
int64_t getValue() const
Definition: MCExpr.h:151
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:43
Streaming machine code generation interface.
Definition: MCStreamer.h:169
MCTargetStreamer * getTargetStreamer()
Definition: MCStreamer.h:236
Container class for subtarget features.
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0&#39;s from the least significant bit to the most stopping at the first 1...
Definition: MathExtras.h:112
unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg...
LLVM_NODISCARD StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition: StringRef.h:836
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:149
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static int MCLOHNameToId(StringRef Name)
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:24
static bool isValidSVEKind(StringRef Name)
virtual MCAsmLexer & getLexer()=0
This file declares a class to represent arbitrary precision floating point values and provide a varie...
MCLOHType
Linker Optimization Hint Type.
bool isExpr() const
Definition: MCInst.h:61
int64_t getIntVal() const
Definition: MCAsmLexer.h:121
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition: MCAsmLexer.h:223
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition: APInt.h:443
Binary assembler expressions.
Definition: MCExpr.h:399
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ATTRIBUTE_ALWAYS_INLINE R Default(const T &Value) const
Definition: StringSwitch.h:244
const char * Name
std::enable_if< std::numeric_limits< T >::is_signed, bool >::type getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:497
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef drop_front(size_t N=1) const
Return a StringRef equal to &#39;this&#39; but with the first N elements dropped.
Definition: StringRef.h:645
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
MCStreamer & getStreamer()
Definition: MCStreamer.h:88
void setOpcode(unsigned Op)
Definition: MCInst.h:171
bool isSubRegisterEq(unsigned RegA, unsigned RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:864
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:180
LLVM_NODISCARD std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:727
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition: StringRef.h:710
static StringRef MCLOHDirectiveName()
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:531
StringMap - This is an unconventional map that is specialized for handling keys that are "strings"...
Definition: StringMap.h:224
bool is(TokenKind K) const
Definition: MCAsmLexer.h:88
Class for arbitrary precision integers.
Definition: APInt.h:69
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
const SysReg * lookupSysRegByName(StringRef)
Base class for user error types.
Definition: Error.h:331
uint32_t parseGenericRegister(StringRef Name)
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static const struct @338 ExtensionMap[]
loop data Loop Data Prefetch
static SMLoc getFromPointer(const char *Ptr)
Definition: SMLoc.h:37
bool haveFeatures(FeatureBitset ActiveFeatures) const
static bool isAdvSIMDModImmType10(uint64_t Imm)
uint16_t getEncodingValue(unsigned RegNo) const
Returns the encoding for RegNo.
StringRef getABIName() const
getABIName - If this returns a non-empty string this represents the textual name of the ABI that we w...
static const size_t npos
Definition: StringRef.h:51
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:61
#define N
static bool isValidMCLOHType(unsigned Kind)
MCSubtargetInfo - Generic base class for all target subtargets.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
static std::string AArch64MnemonicSpellCheck(StringRef S, uint64_t FBS, unsigned VariantID=0)
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition: APInt.h:449
Opcode getOpcode() const
Get the kind of this binary expression.
Definition: MCExpr.h:543
const unsigned Kind
LLVM_NODISCARD std::string lower() const
Definition: StringRef.cpp:123
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static int MCLOHIdToNbArgs(MCLOHType Kind)
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:285
LLVM Value Representation.
Definition: Value.h:73
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:235
const FeatureBitset Features
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
This class implements an extremely fast bulk output stream that can only output to a stream...
Definition: raw_ostream.h:44
Subtraction.
Definition: MCExpr.h:423
void addOperand(const MCOperand &Op)
Definition: MCInst.h:184
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
Represents a location in source code.
Definition: SMLoc.h:24
static const char * getSubtargetFeatureName(uint64_t Val)
unsigned getOpcode() const
Definition: MCInst.h:172
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition: StringRef.h:298
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:123
bool getArchFeatures(ArchKind AK, std::vector< StringRef > &Features)
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx)
Definition: MCExpr.cpp:159
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
static void parseValidVectorKind(StringRef Name, unsigned &NumElements, char &ElementKind)
void LLVMInitializeAArch64AsmParser()
Force static initialization.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
unsigned getDefaultExtensions(StringRef CPU, ArchKind AK)
void setDefaultFeatures(StringRef CPU, StringRef FS)
Set the features to the default for the given CPU with an appended feature string.