LLVM  7.0.0svn
AArch64AsmParser.cpp
Go to the documentation of this file.
1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
14 #include "Utils/AArch64BaseInfo.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/ADT/StringMap.h"
22 #include "llvm/ADT/StringRef.h"
23 #include "llvm/ADT/StringSwitch.h"
24 #include "llvm/ADT/Twine.h"
25 #include "llvm/MC/MCContext.h"
26 #include "llvm/MC/MCExpr.h"
27 #include "llvm/MC/MCInst.h"
35 #include "llvm/MC/MCRegisterInfo.h"
36 #include "llvm/MC/MCStreamer.h"
38 #include "llvm/MC/MCSymbol.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/Compiler.h"
45 #include "llvm/Support/SMLoc.h"
49 #include <cassert>
50 #include <cctype>
51 #include <cstdint>
52 #include <cstdio>
53 #include <string>
54 #include <tuple>
55 #include <utility>
56 #include <vector>
57 
58 using namespace llvm;
59 
60 namespace {
61 
62 enum class RegKind {
63  Scalar,
64  NeonVector,
65  SVEDataVector,
66  SVEPredicateVector
67 };
68 
69 class AArch64AsmParser : public MCTargetAsmParser {
70 private:
71  StringRef Mnemonic; ///< Instruction mnemonic.
72 
73  // Map of register aliases registers via the .req directive.
75 
76  AArch64TargetStreamer &getTargetStreamer() {
77  MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
78  return static_cast<AArch64TargetStreamer &>(TS);
79  }
80 
81  SMLoc getLoc() const { return getParser().getTok().getLoc(); }
82 
83  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
84  void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
85  AArch64CC::CondCode parseCondCodeString(StringRef Cond);
86  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
87  unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
88  int tryParseRegister();
89  int tryMatchVectorRegister(StringRef &Kind, bool expected);
90  bool parseRegister(OperandVector &Operands);
91  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
92  bool parseVectorList(OperandVector &Operands);
93  bool parseOperand(OperandVector &Operands, bool isCondCode,
94  bool invertCondCode);
95 
96  bool showMatchError(SMLoc Loc, unsigned ErrCode, OperandVector &Operands);
97 
98  bool parseDirectiveArch(SMLoc L);
99  bool parseDirectiveCPU(SMLoc L);
100  bool parseDirectiveWord(unsigned Size, SMLoc L);
101  bool parseDirectiveInst(SMLoc L);
102 
103  bool parseDirectiveTLSDescCall(SMLoc L);
104 
105  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
106  bool parseDirectiveLtorg(SMLoc L);
107 
108  bool parseDirectiveReq(StringRef Name, SMLoc L);
109  bool parseDirectiveUnreq(SMLoc L);
110 
111  bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
112  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
113  OperandVector &Operands, MCStreamer &Out,
114  uint64_t &ErrorInfo,
115  bool MatchingInlineAsm) override;
116 /// @name Auto-generated Match Functions
117 /// {
118 
119 #define GET_ASSEMBLER_HEADER
120 #include "AArch64GenAsmMatcher.inc"
121 
122  /// }
123 
124  OperandMatchResultTy tryParseSVERegister(int &Reg, StringRef &Kind,
125  RegKind MatchKind);
126  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
127  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
128  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
129  OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
130  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
131  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
132  OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
133  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
134  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
135  OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
136  OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
137  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
138  bool tryParseNeonVectorRegister(OperandVector &Operands);
139  OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
140  template <bool ParseSuffix>
141  OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
142  OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
143  OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
144 
145 public:
146  enum AArch64MatchResultTy {
147  Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
148 #define GET_OPERAND_DIAGNOSTIC_TYPES
149 #include "AArch64GenAsmMatcher.inc"
150  };
151  bool IsILP32;
152 
153  AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
154  const MCInstrInfo &MII, const MCTargetOptions &Options)
155  : MCTargetAsmParser(Options, STI, MII) {
156  IsILP32 = Options.getABIName() == "ilp32";
158  MCStreamer &S = getParser().getStreamer();
159  if (S.getTargetStreamer() == nullptr)
160  new AArch64TargetStreamer(S);
161 
162  // Initialize the set of available features.
163  setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
164  }
165 
166  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
167  SMLoc NameLoc, OperandVector &Operands) override;
168  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
169  bool ParseDirective(AsmToken DirectiveID) override;
170  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
171  unsigned Kind) override;
172 
173  static bool classifySymbolRef(const MCExpr *Expr,
174  AArch64MCExpr::VariantKind &ELFRefKind,
175  MCSymbolRefExpr::VariantKind &DarwinRefKind,
176  int64_t &Addend);
177 };
178 
179 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
180 /// instruction.
181 class AArch64Operand : public MCParsedAsmOperand {
182 private:
183  enum KindTy {
184  k_Immediate,
185  k_ShiftedImm,
186  k_CondCode,
187  k_Register,
188  k_VectorList,
189  k_VectorIndex,
190  k_Token,
191  k_SysReg,
192  k_SysCR,
193  k_Prefetch,
194  k_ShiftExtend,
195  k_FPImm,
196  k_Barrier,
197  k_PSBHint,
198  } Kind;
199 
200  SMLoc StartLoc, EndLoc;
201 
202  struct TokOp {
203  const char *Data;
204  unsigned Length;
205  bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
206  };
207 
208  struct RegOp {
209  unsigned RegNum;
210  RegKind Kind;
211 
212  int ElementWidth;
213  };
214 
215  struct VectorListOp {
216  unsigned RegNum;
217  unsigned Count;
218  unsigned NumElements;
219  unsigned ElementKind;
220  };
221 
222  struct VectorIndexOp {
223  unsigned Val;
224  };
225 
226  struct ImmOp {
227  const MCExpr *Val;
228  };
229 
230  struct ShiftedImmOp {
231  const MCExpr *Val;
232  unsigned ShiftAmount;
233  };
234 
235  struct CondCodeOp {
236  AArch64CC::CondCode Code;
237  };
238 
239  struct FPImmOp {
240  unsigned Val; // Encoded 8-bit representation.
241  };
242 
243  struct BarrierOp {
244  const char *Data;
245  unsigned Length;
246  unsigned Val; // Not the enum since not all values have names.
247  };
248 
249  struct SysRegOp {
250  const char *Data;
251  unsigned Length;
252  uint32_t MRSReg;
253  uint32_t MSRReg;
254  uint32_t PStateField;
255  };
256 
257  struct SysCRImmOp {
258  unsigned Val;
259  };
260 
261  struct PrefetchOp {
262  const char *Data;
263  unsigned Length;
264  unsigned Val;
265  };
266 
267  struct PSBHintOp {
268  const char *Data;
269  unsigned Length;
270  unsigned Val;
271  };
272 
273  struct ShiftExtendOp {
275  unsigned Amount;
276  bool HasExplicitAmount;
277  };
278 
279  struct ExtendOp {
280  unsigned Val;
281  };
282 
283  union {
284  struct TokOp Tok;
285  struct RegOp Reg;
286  struct VectorListOp VectorList;
287  struct VectorIndexOp VectorIndex;
288  struct ImmOp Imm;
289  struct ShiftedImmOp ShiftedImm;
290  struct CondCodeOp CondCode;
291  struct FPImmOp FPImm;
292  struct BarrierOp Barrier;
293  struct SysRegOp SysReg;
294  struct SysCRImmOp SysCRImm;
295  struct PrefetchOp Prefetch;
296  struct PSBHintOp PSBHint;
297  struct ShiftExtendOp ShiftExtend;
298  };
299 
300  // Keep the MCContext around as the MCExprs may need manipulated during
301  // the add<>Operands() calls.
302  MCContext &Ctx;
303 
304 public:
305  AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
306 
307  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
308  Kind = o.Kind;
309  StartLoc = o.StartLoc;
310  EndLoc = o.EndLoc;
311  switch (Kind) {
312  case k_Token:
313  Tok = o.Tok;
314  break;
315  case k_Immediate:
316  Imm = o.Imm;
317  break;
318  case k_ShiftedImm:
319  ShiftedImm = o.ShiftedImm;
320  break;
321  case k_CondCode:
322  CondCode = o.CondCode;
323  break;
324  case k_FPImm:
325  FPImm = o.FPImm;
326  break;
327  case k_Barrier:
328  Barrier = o.Barrier;
329  break;
330  case k_Register:
331  Reg = o.Reg;
332  break;
333  case k_VectorList:
334  VectorList = o.VectorList;
335  break;
336  case k_VectorIndex:
337  VectorIndex = o.VectorIndex;
338  break;
339  case k_SysReg:
340  SysReg = o.SysReg;
341  break;
342  case k_SysCR:
343  SysCRImm = o.SysCRImm;
344  break;
345  case k_Prefetch:
346  Prefetch = o.Prefetch;
347  break;
348  case k_PSBHint:
349  PSBHint = o.PSBHint;
350  break;
351  case k_ShiftExtend:
352  ShiftExtend = o.ShiftExtend;
353  break;
354  }
355  }
356 
357  /// getStartLoc - Get the location of the first token of this operand.
358  SMLoc getStartLoc() const override { return StartLoc; }
359  /// getEndLoc - Get the location of the last token of this operand.
360  SMLoc getEndLoc() const override { return EndLoc; }
361 
362  StringRef getToken() const {
363  assert(Kind == k_Token && "Invalid access!");
364  return StringRef(Tok.Data, Tok.Length);
365  }
366 
367  bool isTokenSuffix() const {
368  assert(Kind == k_Token && "Invalid access!");
369  return Tok.IsSuffix;
370  }
371 
372  const MCExpr *getImm() const {
373  assert(Kind == k_Immediate && "Invalid access!");
374  return Imm.Val;
375  }
376 
377  const MCExpr *getShiftedImmVal() const {
378  assert(Kind == k_ShiftedImm && "Invalid access!");
379  return ShiftedImm.Val;
380  }
381 
382  unsigned getShiftedImmShift() const {
383  assert(Kind == k_ShiftedImm && "Invalid access!");
384  return ShiftedImm.ShiftAmount;
385  }
386 
387  AArch64CC::CondCode getCondCode() const {
388  assert(Kind == k_CondCode && "Invalid access!");
389  return CondCode.Code;
390  }
391 
392  unsigned getFPImm() const {
393  assert(Kind == k_FPImm && "Invalid access!");
394  return FPImm.Val;
395  }
396 
397  unsigned getBarrier() const {
398  assert(Kind == k_Barrier && "Invalid access!");
399  return Barrier.Val;
400  }
401 
402  StringRef getBarrierName() const {
403  assert(Kind == k_Barrier && "Invalid access!");
404  return StringRef(Barrier.Data, Barrier.Length);
405  }
406 
407  unsigned getReg() const override {
408  assert(Kind == k_Register && "Invalid access!");
409  return Reg.RegNum;
410  }
411 
412  unsigned getVectorListStart() const {
413  assert(Kind == k_VectorList && "Invalid access!");
414  return VectorList.RegNum;
415  }
416 
417  unsigned getVectorListCount() const {
418  assert(Kind == k_VectorList && "Invalid access!");
419  return VectorList.Count;
420  }
421 
422  unsigned getVectorIndex() const {
423  assert(Kind == k_VectorIndex && "Invalid access!");
424  return VectorIndex.Val;
425  }
426 
427  StringRef getSysReg() const {
428  assert(Kind == k_SysReg && "Invalid access!");
429  return StringRef(SysReg.Data, SysReg.Length);
430  }
431 
432  unsigned getSysCR() const {
433  assert(Kind == k_SysCR && "Invalid access!");
434  return SysCRImm.Val;
435  }
436 
437  unsigned getPrefetch() const {
438  assert(Kind == k_Prefetch && "Invalid access!");
439  return Prefetch.Val;
440  }
441 
442  unsigned getPSBHint() const {
443  assert(Kind == k_PSBHint && "Invalid access!");
444  return PSBHint.Val;
445  }
446 
447  StringRef getPSBHintName() const {
448  assert(Kind == k_PSBHint && "Invalid access!");
449  return StringRef(PSBHint.Data, PSBHint.Length);
450  }
451 
452  StringRef getPrefetchName() const {
453  assert(Kind == k_Prefetch && "Invalid access!");
454  return StringRef(Prefetch.Data, Prefetch.Length);
455  }
456 
457  AArch64_AM::ShiftExtendType getShiftExtendType() const {
458  assert(Kind == k_ShiftExtend && "Invalid access!");
459  return ShiftExtend.Type;
460  }
461 
462  unsigned getShiftExtendAmount() const {
463  assert(Kind == k_ShiftExtend && "Invalid access!");
464  return ShiftExtend.Amount;
465  }
466 
467  bool hasShiftExtendAmount() const {
468  assert(Kind == k_ShiftExtend && "Invalid access!");
469  return ShiftExtend.HasExplicitAmount;
470  }
471 
472  bool isImm() const override { return Kind == k_Immediate; }
473  bool isMem() const override { return false; }
474 
475  template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
476 
477  template <int Bits, int Scale> bool isSImmScaled() const {
478  if (!isImm())
479  return false;
480  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
481  if (!MCE)
482  return false;
483 
484  int64_t Shift = Bits - 1;
485  int64_t MinVal = (int64_t(1) << Shift) * -Scale;
486  int64_t MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
487 
488  int64_t Val = MCE->getValue();
489  return Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0;
490  }
491 
492  bool isSVEPattern() const {
493  if (!isImm())
494  return false;
495  auto *MCE = dyn_cast<MCConstantExpr>(getImm());
496  if (!MCE)
497  return false;
498  int64_t Val = MCE->getValue();
499  return Val >= 0 && Val < 32;
500  }
501 
502  bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
503  AArch64MCExpr::VariantKind ELFRefKind;
504  MCSymbolRefExpr::VariantKind DarwinRefKind;
505  int64_t Addend;
506  if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
507  Addend)) {
508  // If we don't understand the expression, assume the best and
509  // let the fixup and relocation code deal with it.
510  return true;
511  }
512 
513  if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
514  ELFRefKind == AArch64MCExpr::VK_LO12 ||
515  ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
516  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
517  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
518  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
519  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
520  ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
521  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
522  // Note that we don't range-check the addend. It's adjusted modulo page
523  // size when converted, so there is no "out of range" condition when using
524  // @pageoff.
525  return Addend >= 0 && (Addend % Scale) == 0;
526  } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
527  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
528  // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
529  return Addend == 0;
530  }
531 
532  return false;
533  }
534 
535  template <int Scale> bool isUImm12Offset() const {
536  if (!isImm())
537  return false;
538 
539  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
540  if (!MCE)
541  return isSymbolicUImm12Offset(getImm(), Scale);
542 
543  int64_t Val = MCE->getValue();
544  return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
545  }
546 
547  template <int N, int M>
548  bool isImmInRange() const {
549  if (!isImm())
550  return false;
551  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
552  if (!MCE)
553  return false;
554  int64_t Val = MCE->getValue();
555  return (Val >= N && Val <= M);
556  }
557 
558  // NOTE: Also used for isLogicalImmNot as anything that can be represented as
559  // a logical immediate can always be represented when inverted.
560  template <typename T>
561  bool isLogicalImm() const {
562  if (!isImm())
563  return false;
564  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
565  if (!MCE)
566  return false;
567 
568  int64_t Val = MCE->getValue();
569  int64_t SVal = typename std::make_signed<T>::type(Val);
570  int64_t UVal = typename std::make_unsigned<T>::type(Val);
571  if (Val != SVal && Val != UVal)
572  return false;
573 
574  return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8);
575  }
576 
577  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
578 
579  bool isAddSubImm() const {
580  if (!isShiftedImm() && !isImm())
581  return false;
582 
583  const MCExpr *Expr;
584 
585  // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
586  if (isShiftedImm()) {
587  unsigned Shift = ShiftedImm.ShiftAmount;
588  Expr = ShiftedImm.Val;
589  if (Shift != 0 && Shift != 12)
590  return false;
591  } else {
592  Expr = getImm();
593  }
594 
595  AArch64MCExpr::VariantKind ELFRefKind;
596  MCSymbolRefExpr::VariantKind DarwinRefKind;
597  int64_t Addend;
598  if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
599  DarwinRefKind, Addend)) {
600  return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
601  || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
602  || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
603  || ELFRefKind == AArch64MCExpr::VK_LO12
604  || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
605  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
606  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
607  || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
608  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
609  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
610  || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
611  }
612 
613  // If it's a constant, it should be a real immediate in range:
614  if (auto *CE = dyn_cast<MCConstantExpr>(Expr))
615  return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
616 
617  // If it's an expression, we hope for the best and let the fixup/relocation
618  // code deal with it.
619  return true;
620  }
621 
622  bool isAddSubImmNeg() const {
623  if (!isShiftedImm() && !isImm())
624  return false;
625 
626  const MCExpr *Expr;
627 
628  // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
629  if (isShiftedImm()) {
630  unsigned Shift = ShiftedImm.ShiftAmount;
631  Expr = ShiftedImm.Val;
632  if (Shift != 0 && Shift != 12)
633  return false;
634  } else
635  Expr = getImm();
636 
637  // Otherwise it should be a real negative immediate in range:
638  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
639  return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
640  }
641 
642  bool isCondCode() const { return Kind == k_CondCode; }
643 
644  bool isSIMDImmType10() const {
645  if (!isImm())
646  return false;
647  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
648  if (!MCE)
649  return false;
651  }
652 
653  template<int N>
654  bool isBranchTarget() const {
655  if (!isImm())
656  return false;
657  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
658  if (!MCE)
659  return true;
660  int64_t Val = MCE->getValue();
661  if (Val & 0x3)
662  return false;
663  assert(N > 0 && "Branch target immediate cannot be 0 bits!");
664  return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
665  }
666 
667  bool
668  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
669  if (!isImm())
670  return false;
671 
672  AArch64MCExpr::VariantKind ELFRefKind;
673  MCSymbolRefExpr::VariantKind DarwinRefKind;
674  int64_t Addend;
675  if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
676  DarwinRefKind, Addend)) {
677  return false;
678  }
679  if (DarwinRefKind != MCSymbolRefExpr::VK_None)
680  return false;
681 
682  for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
683  if (ELFRefKind == AllowedModifiers[i])
684  return Addend == 0;
685  }
686 
687  return false;
688  }
689 
690  bool isMovZSymbolG3() const {
691  return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
692  }
693 
694  bool isMovZSymbolG2() const {
698  }
699 
700  bool isMovZSymbolG1() const {
701  return isMovWSymbol({
705  });
706  }
707 
708  bool isMovZSymbolG0() const {
712  }
713 
714  bool isMovKSymbolG3() const {
715  return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
716  }
717 
718  bool isMovKSymbolG2() const {
719  return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
720  }
721 
722  bool isMovKSymbolG1() const {
723  return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
726  }
727 
728  bool isMovKSymbolG0() const {
729  return isMovWSymbol(
732  }
733 
734  template<int RegWidth, int Shift>
735  bool isMOVZMovAlias() const {
736  if (!isImm()) return false;
737 
738  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
739  if (!CE) return false;
740  uint64_t Value = CE->getValue();
741 
742  return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
743  }
744 
745  template<int RegWidth, int Shift>
746  bool isMOVNMovAlias() const {
747  if (!isImm()) return false;
748 
749  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
750  if (!CE) return false;
751  uint64_t Value = CE->getValue();
752 
753  return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
754  }
755 
756  bool isFPImm() const { return Kind == k_FPImm; }
757  bool isBarrier() const { return Kind == k_Barrier; }
758  bool isSysReg() const { return Kind == k_SysReg; }
759 
760  bool isMRSSystemRegister() const {
761  if (!isSysReg()) return false;
762 
763  return SysReg.MRSReg != -1U;
764  }
765 
766  bool isMSRSystemRegister() const {
767  if (!isSysReg()) return false;
768  return SysReg.MSRReg != -1U;
769  }
770 
771  bool isSystemPStateFieldWithImm0_1() const {
772  if (!isSysReg()) return false;
773  return (SysReg.PStateField == AArch64PState::PAN ||
774  SysReg.PStateField == AArch64PState::UAO);
775  }
776 
777  bool isSystemPStateFieldWithImm0_15() const {
778  if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
779  return SysReg.PStateField != -1U;
780  }
781 
782  bool isReg() const override {
783  return Kind == k_Register;
784  }
785 
786  bool isScalarReg() const {
787  return Kind == k_Register && Reg.Kind == RegKind::Scalar;
788  }
789 
790  bool isNeonVectorReg() const {
791  return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
792  }
793 
794  bool isNeonVectorRegLo() const {
795  return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
796  AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
797  Reg.RegNum);
798  }
799 
800  template <unsigned Class> bool isSVEVectorReg() const {
801  RegKind RK;
802  switch (Class) {
803  case AArch64::ZPRRegClassID:
804  RK = RegKind::SVEDataVector;
805  break;
806  case AArch64::PPRRegClassID:
807  case AArch64::PPR_3bRegClassID:
808  RK = RegKind::SVEPredicateVector;
809  break;
810  default:
811  llvm_unreachable("Unsupport register class");
812  }
813 
814  return (Kind == k_Register && Reg.Kind == RK) &&
815  AArch64MCRegisterClasses[Class].contains(getReg());
816  }
817 
818  template <int ElementWidth, unsigned Class>
819  bool isSVEVectorRegOfWidth() const {
820  return isSVEVectorReg<Class>() &&
821  (ElementWidth == -1 || Reg.ElementWidth == ElementWidth);
822  }
823 
824  bool isGPR32as64() const {
825  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
826  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
827  }
828 
829  bool isWSeqPair() const {
830  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
831  AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
832  Reg.RegNum);
833  }
834 
835  bool isXSeqPair() const {
836  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
837  AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
838  Reg.RegNum);
839  }
840 
841  bool isGPR64sp0() const {
842  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
843  AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
844  }
845 
846  template<int64_t Angle, int64_t Remainder>
847  bool isComplexRotation() const {
848  if (!isImm()) return false;
849 
850  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
851  if (!CE) return false;
852  uint64_t Value = CE->getValue();
853 
854  return (Value % Angle == Remainder && Value <= 270);
855  }
856 
857  /// Is this a vector list with the type implicit (presumably attached to the
858  /// instruction itself)?
859  template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
860  return Kind == k_VectorList && VectorList.Count == NumRegs &&
861  !VectorList.ElementKind;
862  }
863 
864  template <unsigned NumRegs, unsigned NumElements, char ElementKind>
865  bool isTypedVectorList() const {
866  if (Kind != k_VectorList)
867  return false;
868  if (VectorList.Count != NumRegs)
869  return false;
870  if (VectorList.ElementKind != ElementKind)
871  return false;
872  return VectorList.NumElements == NumElements;
873  }
874 
875  bool isVectorIndex1() const {
876  return Kind == k_VectorIndex && VectorIndex.Val == 1;
877  }
878 
879  bool isVectorIndexB() const {
880  return Kind == k_VectorIndex && VectorIndex.Val < 16;
881  }
882 
883  bool isVectorIndexH() const {
884  return Kind == k_VectorIndex && VectorIndex.Val < 8;
885  }
886 
887  bool isVectorIndexS() const {
888  return Kind == k_VectorIndex && VectorIndex.Val < 4;
889  }
890 
891  bool isVectorIndexD() const {
892  return Kind == k_VectorIndex && VectorIndex.Val < 2;
893  }
894 
895  bool isToken() const override { return Kind == k_Token; }
896 
897  bool isTokenEqual(StringRef Str) const {
898  return Kind == k_Token && getToken() == Str;
899  }
900  bool isSysCR() const { return Kind == k_SysCR; }
901  bool isPrefetch() const { return Kind == k_Prefetch; }
902  bool isPSBHint() const { return Kind == k_PSBHint; }
903  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
904  bool isShifter() const {
905  if (!isShiftExtend())
906  return false;
907 
908  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
909  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
910  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
911  ST == AArch64_AM::MSL);
912  }
913  bool isExtend() const {
914  if (!isShiftExtend())
915  return false;
916 
917  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
918  return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
919  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
920  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
921  ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
922  ET == AArch64_AM::LSL) &&
923  getShiftExtendAmount() <= 4;
924  }
925 
926  bool isExtend64() const {
927  if (!isExtend())
928  return false;
929  // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
930  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
931  return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
932  }
933 
934  bool isExtendLSL64() const {
935  if (!isExtend())
936  return false;
937  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
938  return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
939  ET == AArch64_AM::LSL) &&
940  getShiftExtendAmount() <= 4;
941  }
942 
943  template<int Width> bool isMemXExtend() const {
944  if (!isExtend())
945  return false;
946  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
947  return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
948  (getShiftExtendAmount() == Log2_32(Width / 8) ||
949  getShiftExtendAmount() == 0);
950  }
951 
952  template<int Width> bool isMemWExtend() const {
953  if (!isExtend())
954  return false;
955  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
956  return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
957  (getShiftExtendAmount() == Log2_32(Width / 8) ||
958  getShiftExtendAmount() == 0);
959  }
960 
961  template <unsigned width>
962  bool isArithmeticShifter() const {
963  if (!isShifter())
964  return false;
965 
966  // An arithmetic shifter is LSL, LSR, or ASR.
967  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
968  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
969  ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
970  }
971 
972  template <unsigned width>
973  bool isLogicalShifter() const {
974  if (!isShifter())
975  return false;
976 
977  // A logical shifter is LSL, LSR, ASR or ROR.
978  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
979  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
980  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
981  getShiftExtendAmount() < width;
982  }
983 
984  bool isMovImm32Shifter() const {
985  if (!isShifter())
986  return false;
987 
988  // A MOVi shifter is LSL of 0, 16, 32, or 48.
989  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
990  if (ST != AArch64_AM::LSL)
991  return false;
992  uint64_t Val = getShiftExtendAmount();
993  return (Val == 0 || Val == 16);
994  }
995 
996  bool isMovImm64Shifter() const {
997  if (!isShifter())
998  return false;
999 
1000  // A MOVi shifter is LSL of 0 or 16.
1001  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1002  if (ST != AArch64_AM::LSL)
1003  return false;
1004  uint64_t Val = getShiftExtendAmount();
1005  return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1006  }
1007 
1008  bool isLogicalVecShifter() const {
1009  if (!isShifter())
1010  return false;
1011 
1012  // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1013  unsigned Shift = getShiftExtendAmount();
1014  return getShiftExtendType() == AArch64_AM::LSL &&
1015  (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1016  }
1017 
1018  bool isLogicalVecHalfWordShifter() const {
1019  if (!isLogicalVecShifter())
1020  return false;
1021 
1022  // A logical vector shifter is a left shift by 0 or 8.
1023  unsigned Shift = getShiftExtendAmount();
1024  return getShiftExtendType() == AArch64_AM::LSL &&
1025  (Shift == 0 || Shift == 8);
1026  }
1027 
1028  bool isMoveVecShifter() const {
1029  if (!isShiftExtend())
1030  return false;
1031 
1032  // A logical vector shifter is a left shift by 8 or 16.
1033  unsigned Shift = getShiftExtendAmount();
1034  return getShiftExtendType() == AArch64_AM::MSL &&
1035  (Shift == 8 || Shift == 16);
1036  }
1037 
1038  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1039  // to LDUR/STUR when the offset is not legal for the former but is for
1040  // the latter. As such, in addition to checking for being a legal unscaled
1041  // address, also check that it is not a legal scaled address. This avoids
1042  // ambiguity in the matcher.
1043  template<int Width>
1044  bool isSImm9OffsetFB() const {
1045  return isSImm<9>() && !isUImm12Offset<Width / 8>();
1046  }
1047 
1048  bool isAdrpLabel() const {
1049  // Validation was handled during parsing, so we just sanity check that
1050  // something didn't go haywire.
1051  if (!isImm())
1052  return false;
1053 
1054  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1055  int64_t Val = CE->getValue();
1056  int64_t Min = - (4096 * (1LL << (21 - 1)));
1057  int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1058  return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1059  }
1060 
1061  return true;
1062  }
1063 
1064  bool isAdrLabel() const {
1065  // Validation was handled during parsing, so we just sanity check that
1066  // something didn't go haywire.
1067  if (!isImm())
1068  return false;
1069 
1070  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1071  int64_t Val = CE->getValue();
1072  int64_t Min = - (1LL << (21 - 1));
1073  int64_t Max = ((1LL << (21 - 1)) - 1);
1074  return Val >= Min && Val <= Max;
1075  }
1076 
1077  return true;
1078  }
1079 
1080  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1081  // Add as immediates when possible. Null MCExpr = 0.
1082  if (!Expr)
1084  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1085  Inst.addOperand(MCOperand::createImm(CE->getValue()));
1086  else
1087  Inst.addOperand(MCOperand::createExpr(Expr));
1088  }
1089 
1090  void addRegOperands(MCInst &Inst, unsigned N) const {
1091  assert(N == 1 && "Invalid number of operands!");
1093  }
1094 
1095  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1096  assert(N == 1 && "Invalid number of operands!");
1097  assert(
1098  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1099 
1100  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1101  uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1102  RI->getEncodingValue(getReg()));
1103 
1104  Inst.addOperand(MCOperand::createReg(Reg));
1105  }
1106 
1107  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1108  assert(N == 1 && "Invalid number of operands!");
1109  assert(
1110  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1111  Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1112  }
1113 
1114  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1115  assert(N == 1 && "Invalid number of operands!");
1116  assert(
1117  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1119  }
1120 
1121  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1122  assert(N == 1 && "Invalid number of operands!");
1124  }
1125 
1126  template <unsigned NumRegs>
1127  void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1128  assert(N == 1 && "Invalid number of operands!");
1129  static const unsigned FirstRegs[] = { AArch64::D0,
1130  AArch64::D0_D1,
1131  AArch64::D0_D1_D2,
1132  AArch64::D0_D1_D2_D3 };
1133  unsigned FirstReg = FirstRegs[NumRegs - 1];
1134 
1135  Inst.addOperand(
1136  MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1137  }
1138 
1139  template <unsigned NumRegs>
1140  void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1141  assert(N == 1 && "Invalid number of operands!");
1142  static const unsigned FirstRegs[] = { AArch64::Q0,
1143  AArch64::Q0_Q1,
1144  AArch64::Q0_Q1_Q2,
1145  AArch64::Q0_Q1_Q2_Q3 };
1146  unsigned FirstReg = FirstRegs[NumRegs - 1];
1147 
1148  Inst.addOperand(
1149  MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1150  }
1151 
1152  void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1153  assert(N == 1 && "Invalid number of operands!");
1154  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1155  }
1156 
1157  void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1158  assert(N == 1 && "Invalid number of operands!");
1159  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1160  }
1161 
1162  void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1163  assert(N == 1 && "Invalid number of operands!");
1164  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1165  }
1166 
1167  void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1168  assert(N == 1 && "Invalid number of operands!");
1169  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1170  }
1171 
1172  void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1173  assert(N == 1 && "Invalid number of operands!");
1174  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1175  }
1176 
1177  void addImmOperands(MCInst &Inst, unsigned N) const {
1178  assert(N == 1 && "Invalid number of operands!");
1179  // If this is a pageoff symrefexpr with an addend, adjust the addend
1180  // to be only the page-offset portion. Otherwise, just add the expr
1181  // as-is.
1182  addExpr(Inst, getImm());
1183  }
1184 
1185  void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1186  assert(N == 2 && "Invalid number of operands!");
1187  if (isShiftedImm()) {
1188  addExpr(Inst, getShiftedImmVal());
1189  Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1190  } else {
1191  addExpr(Inst, getImm());
1193  }
1194  }
1195 
1196  void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1197  assert(N == 2 && "Invalid number of operands!");
1198 
1199  const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1200  const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1201  int64_t Val = -CE->getValue();
1202  unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1203 
1204  Inst.addOperand(MCOperand::createImm(Val));
1205  Inst.addOperand(MCOperand::createImm(ShiftAmt));
1206  }
1207 
1208  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1209  assert(N == 1 && "Invalid number of operands!");
1210  Inst.addOperand(MCOperand::createImm(getCondCode()));
1211  }
1212 
1213  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1214  assert(N == 1 && "Invalid number of operands!");
1215  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1216  if (!MCE)
1217  addExpr(Inst, getImm());
1218  else
1219  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1220  }
1221 
1222  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1223  addImmOperands(Inst, N);
1224  }
1225 
1226  template<int Scale>
1227  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1228  assert(N == 1 && "Invalid number of operands!");
1229  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1230 
1231  if (!MCE) {
1232  Inst.addOperand(MCOperand::createExpr(getImm()));
1233  return;
1234  }
1235  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1236  }
1237 
1238  void addSImm9Operands(MCInst &Inst, unsigned N) const {
1239  assert(N == 1 && "Invalid number of operands!");
1240  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1242  }
1243 
1244  void addSImm10s8Operands(MCInst &Inst, unsigned N) const {
1245  assert(N == 1 && "Invalid number of operands!");
1246  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1247  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1248  }
1249 
1250  void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1251  assert(N == 1 && "Invalid number of operands!");
1252  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1253  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1254  }
1255 
1256  void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1257  assert(N == 1 && "Invalid number of operands!");
1258  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1259  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1260  }
1261 
1262  void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1263  assert(N == 1 && "Invalid number of operands!");
1264  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1265  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1266  }
1267 
1268  void addImm0_1Operands(MCInst &Inst, unsigned N) const {
1269  assert(N == 1 && "Invalid number of operands!");
1270  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1272  }
1273 
1274  void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1275  assert(N == 1 && "Invalid number of operands!");
1276  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1278  }
1279 
1280  void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1281  assert(N == 1 && "Invalid number of operands!");
1282  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1284  }
1285 
1286  void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1287  assert(N == 1 && "Invalid number of operands!");
1288  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1290  }
1291 
1292  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1293  assert(N == 1 && "Invalid number of operands!");
1294  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1295  assert(MCE && "Invalid constant immediate operand!");
1297  }
1298 
1299  void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1300  assert(N == 1 && "Invalid number of operands!");
1301  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1303  }
1304 
1305  void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1306  assert(N == 1 && "Invalid number of operands!");
1307  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1309  }
1310 
1311  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1312  assert(N == 1 && "Invalid number of operands!");
1313  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1315  }
1316 
1317  void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1318  assert(N == 1 && "Invalid number of operands!");
1319  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1321  }
1322 
1323  void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1324  assert(N == 1 && "Invalid number of operands!");
1325  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1327  }
1328 
1329  void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1330  assert(N == 1 && "Invalid number of operands!");
1331  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1333  }
1334 
1335  void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1336  assert(N == 1 && "Invalid number of operands!");
1337  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1339  }
1340 
1341  void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1342  assert(N == 1 && "Invalid number of operands!");
1343  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1345  }
1346 
1347  void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1348  assert(N == 1 && "Invalid number of operands!");
1349  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1351  }
1352 
1353  void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1354  assert(N == 1 && "Invalid number of operands!");
1355  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1357  }
1358 
1359  template <typename T>
1360  void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1361  assert(N == 1 && "Invalid number of operands!");
1362  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1363  typename std::make_unsigned<T>::type Val = MCE->getValue();
1364  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1365  Inst.addOperand(MCOperand::createImm(encoding));
1366  }
1367 
1368  template <typename T>
1369  void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1370  assert(N == 1 && "Invalid number of operands!");
1371  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1372  typename std::make_unsigned<T>::type Val = ~MCE->getValue();
1373  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1374  Inst.addOperand(MCOperand::createImm(encoding));
1375  }
1376 
1377  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1378  assert(N == 1 && "Invalid number of operands!");
1379  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1380  uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1381  Inst.addOperand(MCOperand::createImm(encoding));
1382  }
1383 
1384  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1385  // Branch operands don't encode the low bits, so shift them off
1386  // here. If it's a label, however, just put it on directly as there's
1387  // not enough information now to do anything.
1388  assert(N == 1 && "Invalid number of operands!");
1389  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1390  if (!MCE) {
1391  addExpr(Inst, getImm());
1392  return;
1393  }
1394  assert(MCE && "Invalid constant immediate operand!");
1395  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1396  }
1397 
1398  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1399  // Branch operands don't encode the low bits, so shift them off
1400  // here. If it's a label, however, just put it on directly as there's
1401  // not enough information now to do anything.
1402  assert(N == 1 && "Invalid number of operands!");
1403  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1404  if (!MCE) {
1405  addExpr(Inst, getImm());
1406  return;
1407  }
1408  assert(MCE && "Invalid constant immediate operand!");
1409  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1410  }
1411 
1412  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1413  // Branch operands don't encode the low bits, so shift them off
1414  // here. If it's a label, however, just put it on directly as there's
1415  // not enough information now to do anything.
1416  assert(N == 1 && "Invalid number of operands!");
1417  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1418  if (!MCE) {
1419  addExpr(Inst, getImm());
1420  return;
1421  }
1422  assert(MCE && "Invalid constant immediate operand!");
1423  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1424  }
1425 
1426  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1427  assert(N == 1 && "Invalid number of operands!");
1428  Inst.addOperand(MCOperand::createImm(getFPImm()));
1429  }
1430 
1431  void addBarrierOperands(MCInst &Inst, unsigned N) const {
1432  assert(N == 1 && "Invalid number of operands!");
1433  Inst.addOperand(MCOperand::createImm(getBarrier()));
1434  }
1435 
1436  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1437  assert(N == 1 && "Invalid number of operands!");
1438 
1439  Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1440  }
1441 
1442  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1443  assert(N == 1 && "Invalid number of operands!");
1444 
1445  Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1446  }
1447 
1448  void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1449  assert(N == 1 && "Invalid number of operands!");
1450 
1451  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1452  }
1453 
1454  void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1455  assert(N == 1 && "Invalid number of operands!");
1456 
1457  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1458  }
1459 
1460  void addSysCROperands(MCInst &Inst, unsigned N) const {
1461  assert(N == 1 && "Invalid number of operands!");
1462  Inst.addOperand(MCOperand::createImm(getSysCR()));
1463  }
1464 
1465  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1466  assert(N == 1 && "Invalid number of operands!");
1467  Inst.addOperand(MCOperand::createImm(getPrefetch()));
1468  }
1469 
1470  void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1471  assert(N == 1 && "Invalid number of operands!");
1472  Inst.addOperand(MCOperand::createImm(getPSBHint()));
1473  }
1474 
1475  void addShifterOperands(MCInst &Inst, unsigned N) const {
1476  assert(N == 1 && "Invalid number of operands!");
1477  unsigned Imm =
1478  AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1479  Inst.addOperand(MCOperand::createImm(Imm));
1480  }
1481 
1482  void addExtendOperands(MCInst &Inst, unsigned N) const {
1483  assert(N == 1 && "Invalid number of operands!");
1484  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1485  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1486  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1487  Inst.addOperand(MCOperand::createImm(Imm));
1488  }
1489 
1490  void addExtend64Operands(MCInst &Inst, unsigned N) const {
1491  assert(N == 1 && "Invalid number of operands!");
1492  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1493  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1494  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1495  Inst.addOperand(MCOperand::createImm(Imm));
1496  }
1497 
1498  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1499  assert(N == 2 && "Invalid number of operands!");
1500  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1501  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1502  Inst.addOperand(MCOperand::createImm(IsSigned));
1503  Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1504  }
1505 
1506  // For 8-bit load/store instructions with a register offset, both the
1507  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1508  // they're disambiguated by whether the shift was explicit or implicit rather
1509  // than its size.
1510  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1511  assert(N == 2 && "Invalid number of operands!");
1512  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1513  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1514  Inst.addOperand(MCOperand::createImm(IsSigned));
1515  Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1516  }
1517 
1518  template<int Shift>
1519  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1520  assert(N == 1 && "Invalid number of operands!");
1521 
1522  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1523  uint64_t Value = CE->getValue();
1524  Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1525  }
1526 
1527  template<int Shift>
1528  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1529  assert(N == 1 && "Invalid number of operands!");
1530 
1531  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1532  uint64_t Value = CE->getValue();
1533  Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1534  }
1535 
1536  void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1537  assert(N == 1 && "Invalid number of operands!");
1538  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1539  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1540  }
1541 
1542  void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1543  assert(N == 1 && "Invalid number of operands!");
1544  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1545  Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1546  }
1547 
1548  void print(raw_ostream &OS) const override;
1549 
1550  static std::unique_ptr<AArch64Operand>
1551  CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1552  auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1553  Op->Tok.Data = Str.data();
1554  Op->Tok.Length = Str.size();
1555  Op->Tok.IsSuffix = IsSuffix;
1556  Op->StartLoc = S;
1557  Op->EndLoc = S;
1558  return Op;
1559  }
1560 
1561  static std::unique_ptr<AArch64Operand>
1562  CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx) {
1563  auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1564  Op->Reg.RegNum = RegNum;
1565  Op->Reg.Kind = Kind;
1566  Op->StartLoc = S;
1567  Op->EndLoc = E;
1568  return Op;
1569  }
1570 
1571  static std::unique_ptr<AArch64Operand>
1572  CreateReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1573  SMLoc S, SMLoc E, MCContext &Ctx) {
1574  auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1575  Op->Reg.RegNum = RegNum;
1576  Op->Reg.ElementWidth = ElementWidth;
1577  Op->Reg.Kind = Kind;
1578  Op->StartLoc = S;
1579  Op->EndLoc = E;
1580  return Op;
1581  }
1582 
1583  static std::unique_ptr<AArch64Operand>
1584  CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1585  char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1586  auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1587  Op->VectorList.RegNum = RegNum;
1588  Op->VectorList.Count = Count;
1589  Op->VectorList.NumElements = NumElements;
1590  Op->VectorList.ElementKind = ElementKind;
1591  Op->StartLoc = S;
1592  Op->EndLoc = E;
1593  return Op;
1594  }
1595 
1596  static std::unique_ptr<AArch64Operand>
1597  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1598  auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1599  Op->VectorIndex.Val = Idx;
1600  Op->StartLoc = S;
1601  Op->EndLoc = E;
1602  return Op;
1603  }
1604 
1605  static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1606  SMLoc E, MCContext &Ctx) {
1607  auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1608  Op->Imm.Val = Val;
1609  Op->StartLoc = S;
1610  Op->EndLoc = E;
1611  return Op;
1612  }
1613 
1614  static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1615  unsigned ShiftAmount,
1616  SMLoc S, SMLoc E,
1617  MCContext &Ctx) {
1618  auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1619  Op->ShiftedImm .Val = Val;
1620  Op->ShiftedImm.ShiftAmount = ShiftAmount;
1621  Op->StartLoc = S;
1622  Op->EndLoc = E;
1623  return Op;
1624  }
1625 
1626  static std::unique_ptr<AArch64Operand>
1627  CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1628  auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1629  Op->CondCode.Code = Code;
1630  Op->StartLoc = S;
1631  Op->EndLoc = E;
1632  return Op;
1633  }
1634 
1635  static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1636  MCContext &Ctx) {
1637  auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1638  Op->FPImm.Val = Val;
1639  Op->StartLoc = S;
1640  Op->EndLoc = S;
1641  return Op;
1642  }
1643 
1644  static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1645  StringRef Str,
1646  SMLoc S,
1647  MCContext &Ctx) {
1648  auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1649  Op->Barrier.Val = Val;
1650  Op->Barrier.Data = Str.data();
1651  Op->Barrier.Length = Str.size();
1652  Op->StartLoc = S;
1653  Op->EndLoc = S;
1654  return Op;
1655  }
1656 
1657  static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1658  uint32_t MRSReg,
1659  uint32_t MSRReg,
1660  uint32_t PStateField,
1661  MCContext &Ctx) {
1662  auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1663  Op->SysReg.Data = Str.data();
1664  Op->SysReg.Length = Str.size();
1665  Op->SysReg.MRSReg = MRSReg;
1666  Op->SysReg.MSRReg = MSRReg;
1667  Op->SysReg.PStateField = PStateField;
1668  Op->StartLoc = S;
1669  Op->EndLoc = S;
1670  return Op;
1671  }
1672 
1673  static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1674  SMLoc E, MCContext &Ctx) {
1675  auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1676  Op->SysCRImm.Val = Val;
1677  Op->StartLoc = S;
1678  Op->EndLoc = E;
1679  return Op;
1680  }
1681 
1682  static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1683  StringRef Str,
1684  SMLoc S,
1685  MCContext &Ctx) {
1686  auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1687  Op->Prefetch.Val = Val;
1688  Op->Barrier.Data = Str.data();
1689  Op->Barrier.Length = Str.size();
1690  Op->StartLoc = S;
1691  Op->EndLoc = S;
1692  return Op;
1693  }
1694 
1695  static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1696  StringRef Str,
1697  SMLoc S,
1698  MCContext &Ctx) {
1699  auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1700  Op->PSBHint.Val = Val;
1701  Op->PSBHint.Data = Str.data();
1702  Op->PSBHint.Length = Str.size();
1703  Op->StartLoc = S;
1704  Op->EndLoc = S;
1705  return Op;
1706  }
1707 
1708  static std::unique_ptr<AArch64Operand>
1709  CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1710  bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1711  auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1712  Op->ShiftExtend.Type = ShOp;
1713  Op->ShiftExtend.Amount = Val;
1714  Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1715  Op->StartLoc = S;
1716  Op->EndLoc = E;
1717  return Op;
1718  }
1719 };
1720 
1721 } // end anonymous namespace.
1722 
1723 void AArch64Operand::print(raw_ostream &OS) const {
1724  switch (Kind) {
1725  case k_FPImm:
1726  OS << "<fpimm " << getFPImm() << "("
1727  << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1728  break;
1729  case k_Barrier: {
1730  StringRef Name = getBarrierName();
1731  if (!Name.empty())
1732  OS << "<barrier " << Name << ">";
1733  else
1734  OS << "<barrier invalid #" << getBarrier() << ">";
1735  break;
1736  }
1737  case k_Immediate:
1738  OS << *getImm();
1739  break;
1740  case k_ShiftedImm: {
1741  unsigned Shift = getShiftedImmShift();
1742  OS << "<shiftedimm ";
1743  OS << *getShiftedImmVal();
1744  OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1745  break;
1746  }
1747  case k_CondCode:
1748  OS << "<condcode " << getCondCode() << ">";
1749  break;
1750  case k_Register:
1751  OS << "<register " << getReg() << ">";
1752  break;
1753  case k_VectorList: {
1754  OS << "<vectorlist ";
1755  unsigned Reg = getVectorListStart();
1756  for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1757  OS << Reg + i << " ";
1758  OS << ">";
1759  break;
1760  }
1761  case k_VectorIndex:
1762  OS << "<vectorindex " << getVectorIndex() << ">";
1763  break;
1764  case k_SysReg:
1765  OS << "<sysreg: " << getSysReg() << '>';
1766  break;
1767  case k_Token:
1768  OS << "'" << getToken() << "'";
1769  break;
1770  case k_SysCR:
1771  OS << "c" << getSysCR();
1772  break;
1773  case k_Prefetch: {
1774  StringRef Name = getPrefetchName();
1775  if (!Name.empty())
1776  OS << "<prfop " << Name << ">";
1777  else
1778  OS << "<prfop invalid #" << getPrefetch() << ">";
1779  break;
1780  }
1781  case k_PSBHint:
1782  OS << getPSBHintName();
1783  break;
1784  case k_ShiftExtend:
1785  OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1786  << getShiftExtendAmount();
1787  if (!hasShiftExtendAmount())
1788  OS << "<imp>";
1789  OS << '>';
1790  break;
1791  }
1792 }
1793 
1794 /// @name Auto-generated Match Functions
1795 /// {
1796 
1797 static unsigned MatchRegisterName(StringRef Name);
1798 
1799 /// }
1800 
1801 static unsigned MatchNeonVectorRegName(StringRef Name) {
1802  return StringSwitch<unsigned>(Name.lower())
1803  .Case("v0", AArch64::Q0)
1804  .Case("v1", AArch64::Q1)
1805  .Case("v2", AArch64::Q2)
1806  .Case("v3", AArch64::Q3)
1807  .Case("v4", AArch64::Q4)
1808  .Case("v5", AArch64::Q5)
1809  .Case("v6", AArch64::Q6)
1810  .Case("v7", AArch64::Q7)
1811  .Case("v8", AArch64::Q8)
1812  .Case("v9", AArch64::Q9)
1813  .Case("v10", AArch64::Q10)
1814  .Case("v11", AArch64::Q11)
1815  .Case("v12", AArch64::Q12)
1816  .Case("v13", AArch64::Q13)
1817  .Case("v14", AArch64::Q14)
1818  .Case("v15", AArch64::Q15)
1819  .Case("v16", AArch64::Q16)
1820  .Case("v17", AArch64::Q17)
1821  .Case("v18", AArch64::Q18)
1822  .Case("v19", AArch64::Q19)
1823  .Case("v20", AArch64::Q20)
1824  .Case("v21", AArch64::Q21)
1825  .Case("v22", AArch64::Q22)
1826  .Case("v23", AArch64::Q23)
1827  .Case("v24", AArch64::Q24)
1828  .Case("v25", AArch64::Q25)
1829  .Case("v26", AArch64::Q26)
1830  .Case("v27", AArch64::Q27)
1831  .Case("v28", AArch64::Q28)
1832  .Case("v29", AArch64::Q29)
1833  .Case("v30", AArch64::Q30)
1834  .Case("v31", AArch64::Q31)
1835  .Default(0);
1836 }
1837 
1838 static bool isValidVectorKind(StringRef Name) {
1839  return StringSwitch<bool>(Name.lower())
1840  .Case(".8b", true)
1841  .Case(".16b", true)
1842  .Case(".4h", true)
1843  .Case(".8h", true)
1844  .Case(".2s", true)
1845  .Case(".4s", true)
1846  .Case(".1d", true)
1847  .Case(".2d", true)
1848  .Case(".1q", true)
1849  // Accept the width neutral ones, too, for verbose syntax. If those
1850  // aren't used in the right places, the token operand won't match so
1851  // all will work out.
1852  .Case(".b", true)
1853  .Case(".h", true)
1854  .Case(".s", true)
1855  .Case(".d", true)
1856  // Needed for fp16 scalar pairwise reductions
1857  .Case(".2h", true)
1858  // another special case for the ARMv8.2a dot product operand
1859  .Case(".4b", true)
1860  .Default(false);
1861 }
1862 
1863 static unsigned matchSVEDataVectorRegName(StringRef Name) {
1864  return StringSwitch<unsigned>(Name.lower())
1865  .Case("z0", AArch64::Z0)
1866  .Case("z1", AArch64::Z1)
1867  .Case("z2", AArch64::Z2)
1868  .Case("z3", AArch64::Z3)
1869  .Case("z4", AArch64::Z4)
1870  .Case("z5", AArch64::Z5)
1871  .Case("z6", AArch64::Z6)
1872  .Case("z7", AArch64::Z7)
1873  .Case("z8", AArch64::Z8)
1874  .Case("z9", AArch64::Z9)
1875  .Case("z10", AArch64::Z10)
1876  .Case("z11", AArch64::Z11)
1877  .Case("z12", AArch64::Z12)
1878  .Case("z13", AArch64::Z13)
1879  .Case("z14", AArch64::Z14)
1880  .Case("z15", AArch64::Z15)
1881  .Case("z16", AArch64::Z16)
1882  .Case("z17", AArch64::Z17)
1883  .Case("z18", AArch64::Z18)
1884  .Case("z19", AArch64::Z19)
1885  .Case("z20", AArch64::Z20)
1886  .Case("z21", AArch64::Z21)
1887  .Case("z22", AArch64::Z22)
1888  .Case("z23", AArch64::Z23)
1889  .Case("z24", AArch64::Z24)
1890  .Case("z25", AArch64::Z25)
1891  .Case("z26", AArch64::Z26)
1892  .Case("z27", AArch64::Z27)
1893  .Case("z28", AArch64::Z28)
1894  .Case("z29", AArch64::Z29)
1895  .Case("z30", AArch64::Z30)
1896  .Case("z31", AArch64::Z31)
1897  .Default(0);
1898 }
1899 
1900 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
1901  return StringSwitch<unsigned>(Name.lower())
1902  .Case("p0", AArch64::P0)
1903  .Case("p1", AArch64::P1)
1904  .Case("p2", AArch64::P2)
1905  .Case("p3", AArch64::P3)
1906  .Case("p4", AArch64::P4)
1907  .Case("p5", AArch64::P5)
1908  .Case("p6", AArch64::P6)
1909  .Case("p7", AArch64::P7)
1910  .Case("p8", AArch64::P8)
1911  .Case("p9", AArch64::P9)
1912  .Case("p10", AArch64::P10)
1913  .Case("p11", AArch64::P11)
1914  .Case("p12", AArch64::P12)
1915  .Case("p13", AArch64::P13)
1916  .Case("p14", AArch64::P14)
1917  .Case("p15", AArch64::P15)
1918  .Default(0);
1919 }
1920 
1921 static bool isValidSVEKind(StringRef Name) {
1922  return StringSwitch<bool>(Name.lower())
1923  .Case(".b", true)
1924  .Case(".h", true)
1925  .Case(".s", true)
1926  .Case(".d", true)
1927  .Case(".q", true)
1928  .Default(false);
1929 }
1930 
1931 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1932  char &ElementKind) {
1933  assert(isValidVectorKind(Name));
1934 
1935  ElementKind = Name.lower()[Name.size() - 1];
1936  NumElements = 0;
1937 
1938  if (Name.size() == 2)
1939  return;
1940 
1941  // Parse the lane count
1942  Name = Name.drop_front();
1943  while (isdigit(Name.front())) {
1944  NumElements = 10 * NumElements + (Name.front() - '0');
1945  Name = Name.drop_front();
1946  }
1947 }
1948 
1949 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1950  SMLoc &EndLoc) {
1951  StartLoc = getLoc();
1952  RegNo = tryParseRegister();
1953  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1954  return (RegNo == (unsigned)-1);
1955 }
1956 
1957 // Matches a register name or register alias previously defined by '.req'
1958 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1959  RegKind Kind) {
1960  unsigned RegNum = 0;
1961  if ((RegNum = matchSVEDataVectorRegName(Name)))
1962  return Kind == RegKind::SVEDataVector ? RegNum : 0;
1963 
1964  if ((RegNum = matchSVEPredicateVectorRegName(Name)))
1965  return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
1966 
1967  if ((RegNum = MatchNeonVectorRegName(Name)))
1968  return Kind == RegKind::NeonVector ? RegNum : 0;
1969 
1970  // The parsed register must be of RegKind Scalar
1971  if ((RegNum = MatchRegisterName(Name)))
1972  return Kind == RegKind::Scalar ? RegNum : 0;
1973 
1974  if (!RegNum) {
1975  // Check for aliases registered via .req. Canonicalize to lower case.
1976  // That's more consistent since register names are case insensitive, and
1977  // it's how the original entry was passed in from MC/MCParser/AsmParser.
1978  auto Entry = RegisterReqs.find(Name.lower());
1979  if (Entry == RegisterReqs.end())
1980  return 0;
1981 
1982  // set RegNum if the match is the right kind of register
1983  if (Kind == Entry->getValue().first)
1984  RegNum = Entry->getValue().second;
1985  }
1986  return RegNum;
1987 }
1988 
1989 /// tryParseRegister - Try to parse a register name. The token must be an
1990 /// Identifier when called, and if it is a register name the token is eaten and
1991 /// the register is added to the operand list.
1992 int AArch64AsmParser::tryParseRegister() {
1993  MCAsmParser &Parser = getParser();
1994  const AsmToken &Tok = Parser.getTok();
1995  if (Tok.isNot(AsmToken::Identifier))
1996  return -1;
1997 
1998  std::string lowerCase = Tok.getString().lower();
1999  unsigned RegNum = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2000 
2001  // Also handle a few aliases of registers.
2002  if (RegNum == 0)
2003  RegNum = StringSwitch<unsigned>(lowerCase)
2004  .Case("fp", AArch64::FP)
2005  .Case("lr", AArch64::LR)
2006  .Case("x31", AArch64::XZR)
2007  .Case("w31", AArch64::WZR)
2008  .Default(0);
2009 
2010  if (RegNum == 0)
2011  return -1;
2012 
2013  Parser.Lex(); // Eat identifier token.
2014  return RegNum;
2015 }
2016 
2017 /// tryMatchVectorRegister - Try to parse a vector register name with optional
2018 /// kind specifier. If it is a register specifier, eat the token and return it.
2019 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
2020  MCAsmParser &Parser = getParser();
2021  if (Parser.getTok().isNot(AsmToken::Identifier)) {
2022  TokError("vector register expected");
2023  return -1;
2024  }
2025 
2026  StringRef Name = Parser.getTok().getString();
2027  // If there is a kind specifier, it's separated from the register name by
2028  // a '.'.
2029  size_t Start = 0, Next = Name.find('.');
2030  StringRef Head = Name.slice(Start, Next);
2031  unsigned RegNum = matchRegisterNameAlias(Head, RegKind::NeonVector);
2032 
2033  if (RegNum) {
2034  if (Next != StringRef::npos) {
2035  Kind = Name.slice(Next, StringRef::npos);
2036  if (!isValidVectorKind(Kind)) {
2037  TokError("invalid vector kind qualifier");
2038  return -1;
2039  }
2040  }
2041  Parser.Lex(); // Eat the register token.
2042  return RegNum;
2043  }
2044 
2045  if (expected)
2046  TokError("vector register expected");
2047  return -1;
2048 }
2049 
2050 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2052 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2053  MCAsmParser &Parser = getParser();
2054  SMLoc S = getLoc();
2055 
2056  if (Parser.getTok().isNot(AsmToken::Identifier)) {
2057  Error(S, "Expected cN operand where 0 <= N <= 15");
2058  return MatchOperand_ParseFail;
2059  }
2060 
2061  StringRef Tok = Parser.getTok().getIdentifier();
2062  if (Tok[0] != 'c' && Tok[0] != 'C') {
2063  Error(S, "Expected cN operand where 0 <= N <= 15");
2064  return MatchOperand_ParseFail;
2065  }
2066 
2067  uint32_t CRNum;
2068  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2069  if (BadNum || CRNum > 15) {
2070  Error(S, "Expected cN operand where 0 <= N <= 15");
2071  return MatchOperand_ParseFail;
2072  }
2073 
2074  Parser.Lex(); // Eat identifier token.
2075  Operands.push_back(
2076  AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2077  return MatchOperand_Success;
2078 }
2079 
2080 /// tryParsePrefetch - Try to parse a prefetch operand.
2082 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2083  MCAsmParser &Parser = getParser();
2084  SMLoc S = getLoc();
2085  const AsmToken &Tok = Parser.getTok();
2086  // Either an identifier for named values or a 5-bit immediate.
2087  // Eat optional hash.
2088  if (parseOptionalToken(AsmToken::Hash) ||
2089  Tok.is(AsmToken::Integer)) {
2090  const MCExpr *ImmVal;
2091  if (getParser().parseExpression(ImmVal))
2092  return MatchOperand_ParseFail;
2093 
2094  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2095  if (!MCE) {
2096  TokError("immediate value expected for prefetch operand");
2097  return MatchOperand_ParseFail;
2098  }
2099  unsigned prfop = MCE->getValue();
2100  if (prfop > 31) {
2101  TokError("prefetch operand out of range, [0,31] expected");
2102  return MatchOperand_ParseFail;
2103  }
2104 
2105  auto PRFM = AArch64PRFM::lookupPRFMByEncoding(MCE->getValue());
2106  Operands.push_back(AArch64Operand::CreatePrefetch(
2107  prfop, PRFM ? PRFM->Name : "", S, getContext()));
2108  return MatchOperand_Success;
2109  }
2110 
2111  if (Tok.isNot(AsmToken::Identifier)) {
2112  TokError("pre-fetch hint expected");
2113  return MatchOperand_ParseFail;
2114  }
2115 
2116  auto PRFM = AArch64PRFM::lookupPRFMByName(Tok.getString());
2117  if (!PRFM) {
2118  TokError("pre-fetch hint expected");
2119  return MatchOperand_ParseFail;
2120  }
2121 
2122  Parser.Lex(); // Eat identifier token.
2123  Operands.push_back(AArch64Operand::CreatePrefetch(
2124  PRFM->Encoding, Tok.getString(), S, getContext()));
2125  return MatchOperand_Success;
2126 }
2127 
2128 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2130 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2131  MCAsmParser &Parser = getParser();
2132  SMLoc S = getLoc();
2133  const AsmToken &Tok = Parser.getTok();
2134  if (Tok.isNot(AsmToken::Identifier)) {
2135  TokError("invalid operand for instruction");
2136  return MatchOperand_ParseFail;
2137  }
2138 
2139  auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2140  if (!PSB) {
2141  TokError("invalid operand for instruction");
2142  return MatchOperand_ParseFail;
2143  }
2144 
2145  Parser.Lex(); // Eat identifier token.
2146  Operands.push_back(AArch64Operand::CreatePSBHint(
2147  PSB->Encoding, Tok.getString(), S, getContext()));
2148  return MatchOperand_Success;
2149 }
2150 
2151 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2152 /// instruction.
2154 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2155  MCAsmParser &Parser = getParser();
2156  SMLoc S = getLoc();
2157  const MCExpr *Expr;
2158 
2159  if (Parser.getTok().is(AsmToken::Hash)) {
2160  Parser.Lex(); // Eat hash token.
2161  }
2162 
2163  if (parseSymbolicImmVal(Expr))
2164  return MatchOperand_ParseFail;
2165 
2166  AArch64MCExpr::VariantKind ELFRefKind;
2167  MCSymbolRefExpr::VariantKind DarwinRefKind;
2168  int64_t Addend;
2169  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2170  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2171  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2172  // No modifier was specified at all; this is the syntax for an ELF basic
2173  // ADRP relocation (unfortunately).
2174  Expr =
2176  } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2177  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2178  Addend != 0) {
2179  Error(S, "gotpage label reference not allowed an addend");
2180  return MatchOperand_ParseFail;
2181  } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2182  DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2183  DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2184  ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2185  ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2186  ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2187  // The operand must be an @page or @gotpage qualified symbolref.
2188  Error(S, "page or gotpage label reference expected");
2189  return MatchOperand_ParseFail;
2190  }
2191  }
2192 
2193  // We have either a label reference possibly with addend or an immediate. The
2194  // addend is a raw value here. The linker will adjust it to only reference the
2195  // page.
2196  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2197  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2198 
2199  return MatchOperand_Success;
2200 }
2201 
2202 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2203 /// instruction.
2205 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2206  SMLoc S = getLoc();
2207  const MCExpr *Expr;
2208 
2209  parseOptionalToken(AsmToken::Hash);
2210  if (getParser().parseExpression(Expr))
2211  return MatchOperand_ParseFail;
2212 
2213  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2214  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2215 
2216  return MatchOperand_Success;
2217 }
2218 
2219 /// tryParseFPImm - A floating point immediate expression operand.
2221 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2222  MCAsmParser &Parser = getParser();
2223  SMLoc S = getLoc();
2224 
2225  bool Hash = parseOptionalToken(AsmToken::Hash);
2226 
2227  // Handle negation, as that still comes through as a separate token.
2228  bool isNegative = parseOptionalToken(AsmToken::Minus);
2229 
2230  const AsmToken &Tok = Parser.getTok();
2231  if (Tok.is(AsmToken::Real) || Tok.is(AsmToken::Integer)) {
2232  int64_t Val;
2233  if (Tok.is(AsmToken::Integer) && !isNegative && Tok.getString().startswith("0x")) {
2234  Val = Tok.getIntVal();
2235  if (Val > 255 || Val < 0) {
2236  TokError("encoded floating point value out of range");
2237  return MatchOperand_ParseFail;
2238  }
2239  } else {
2240  APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
2241  if (isNegative)
2242  RealVal.changeSign();
2243 
2244  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2245  Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2246 
2247  // Check for out of range values. As an exception we let Zero through,
2248  // but as tokens instead of an FPImm so that it can be matched by the
2249  // appropriate alias if one exists.
2250  if (RealVal.isPosZero()) {
2251  Parser.Lex(); // Eat the token.
2252  Operands.push_back(AArch64Operand::CreateToken("#0", false, S, getContext()));
2253  Operands.push_back(AArch64Operand::CreateToken(".0", false, S, getContext()));
2254  return MatchOperand_Success;
2255  } else if (Val == -1) {
2256  TokError("expected compatible register or floating-point constant");
2257  return MatchOperand_ParseFail;
2258  }
2259  }
2260  Parser.Lex(); // Eat the token.
2261  Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2262  return MatchOperand_Success;
2263  }
2264 
2265  if (!Hash)
2266  return MatchOperand_NoMatch;
2267 
2268  TokError("invalid floating point immediate");
2269  return MatchOperand_ParseFail;
2270 }
2271 
2272 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2274 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2275  MCAsmParser &Parser = getParser();
2276  SMLoc S = getLoc();
2277 
2278  if (Parser.getTok().is(AsmToken::Hash))
2279  Parser.Lex(); // Eat '#'
2280  else if (Parser.getTok().isNot(AsmToken::Integer))
2281  // Operand should start from # or should be integer, emit error otherwise.
2282  return MatchOperand_NoMatch;
2283 
2284  const MCExpr *Imm;
2285  if (parseSymbolicImmVal(Imm))
2286  return MatchOperand_ParseFail;
2287  else if (Parser.getTok().isNot(AsmToken::Comma)) {
2288  uint64_t ShiftAmount = 0;
2289  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2290  if (MCE) {
2291  int64_t Val = MCE->getValue();
2292  if (Val > 0xfff && (Val & 0xfff) == 0) {
2293  Imm = MCConstantExpr::create(Val >> 12, getContext());
2294  ShiftAmount = 12;
2295  }
2296  }
2297  SMLoc E = Parser.getTok().getLoc();
2298  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2299  getContext()));
2300  return MatchOperand_Success;
2301  }
2302 
2303  // Eat ','
2304  Parser.Lex();
2305 
2306  // The optional operand must be "lsl #N" where N is non-negative.
2307  if (!Parser.getTok().is(AsmToken::Identifier) ||
2308  !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2309  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2310  return MatchOperand_ParseFail;
2311  }
2312 
2313  // Eat 'lsl'
2314  Parser.Lex();
2315 
2316  parseOptionalToken(AsmToken::Hash);
2317 
2318  if (Parser.getTok().isNot(AsmToken::Integer)) {
2319  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2320  return MatchOperand_ParseFail;
2321  }
2322 
2323  int64_t ShiftAmount = Parser.getTok().getIntVal();
2324 
2325  if (ShiftAmount < 0) {
2326  Error(Parser.getTok().getLoc(), "positive shift amount required");
2327  return MatchOperand_ParseFail;
2328  }
2329  Parser.Lex(); // Eat the number
2330 
2331  SMLoc E = Parser.getTok().getLoc();
2332  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2333  S, E, getContext()));
2334  return MatchOperand_Success;
2335 }
2336 
2337 /// parseCondCodeString - Parse a Condition Code string.
2338 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2340  .Case("eq", AArch64CC::EQ)
2341  .Case("ne", AArch64CC::NE)
2342  .Case("cs", AArch64CC::HS)
2343  .Case("hs", AArch64CC::HS)
2344  .Case("cc", AArch64CC::LO)
2345  .Case("lo", AArch64CC::LO)
2346  .Case("mi", AArch64CC::MI)
2347  .Case("pl", AArch64CC::PL)
2348  .Case("vs", AArch64CC::VS)
2349  .Case("vc", AArch64CC::VC)
2350  .Case("hi", AArch64CC::HI)
2351  .Case("ls", AArch64CC::LS)
2352  .Case("ge", AArch64CC::GE)
2353  .Case("lt", AArch64CC::LT)
2354  .Case("gt", AArch64CC::GT)
2355  .Case("le", AArch64CC::LE)
2356  .Case("al", AArch64CC::AL)
2357  .Case("nv", AArch64CC::NV)
2359  return CC;
2360 }
2361 
2362 /// parseCondCode - Parse a Condition Code operand.
2363 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2364  bool invertCondCode) {
2365  MCAsmParser &Parser = getParser();
2366  SMLoc S = getLoc();
2367  const AsmToken &Tok = Parser.getTok();
2368  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2369 
2370  StringRef Cond = Tok.getString();
2371  AArch64CC::CondCode CC = parseCondCodeString(Cond);
2372  if (CC == AArch64CC::Invalid)
2373  return TokError("invalid condition code");
2374  Parser.Lex(); // Eat identifier token.
2375 
2376  if (invertCondCode) {
2377  if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2378  return TokError("condition codes AL and NV are invalid for this instruction");
2380  }
2381 
2382  Operands.push_back(
2383  AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2384  return false;
2385 }
2386 
2387 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2388 /// them if present.
2390 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2391  MCAsmParser &Parser = getParser();
2392  const AsmToken &Tok = Parser.getTok();
2393  std::string LowerID = Tok.getString().lower();
2396  .Case("lsl", AArch64_AM::LSL)
2397  .Case("lsr", AArch64_AM::LSR)
2398  .Case("asr", AArch64_AM::ASR)
2399  .Case("ror", AArch64_AM::ROR)
2400  .Case("msl", AArch64_AM::MSL)
2401  .Case("uxtb", AArch64_AM::UXTB)
2402  .Case("uxth", AArch64_AM::UXTH)
2403  .Case("uxtw", AArch64_AM::UXTW)
2404  .Case("uxtx", AArch64_AM::UXTX)
2405  .Case("sxtb", AArch64_AM::SXTB)
2406  .Case("sxth", AArch64_AM::SXTH)
2407  .Case("sxtw", AArch64_AM::SXTW)
2408  .Case("sxtx", AArch64_AM::SXTX)
2410 
2411  if (ShOp == AArch64_AM::InvalidShiftExtend)
2412  return MatchOperand_NoMatch;
2413 
2414  SMLoc S = Tok.getLoc();
2415  Parser.Lex();
2416 
2417  bool Hash = parseOptionalToken(AsmToken::Hash);
2418 
2419  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2420  if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2421  ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2422  ShOp == AArch64_AM::MSL) {
2423  // We expect a number here.
2424  TokError("expected #imm after shift specifier");
2425  return MatchOperand_ParseFail;
2426  }
2427 
2428  // "extend" type operations don't need an immediate, #0 is implicit.
2429  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2430  Operands.push_back(
2431  AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2432  return MatchOperand_Success;
2433  }
2434 
2435  // Make sure we do actually have a number, identifier or a parenthesized
2436  // expression.
2437  SMLoc E = Parser.getTok().getLoc();
2438  if (!Parser.getTok().is(AsmToken::Integer) &&
2439  !Parser.getTok().is(AsmToken::LParen) &&
2440  !Parser.getTok().is(AsmToken::Identifier)) {
2441  Error(E, "expected integer shift amount");
2442  return MatchOperand_ParseFail;
2443  }
2444 
2445  const MCExpr *ImmVal;
2446  if (getParser().parseExpression(ImmVal))
2447  return MatchOperand_ParseFail;
2448 
2449  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2450  if (!MCE) {
2451  Error(E, "expected constant '#imm' after shift specifier");
2452  return MatchOperand_ParseFail;
2453  }
2454 
2455  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2456  Operands.push_back(AArch64Operand::CreateShiftExtend(
2457  ShOp, MCE->getValue(), true, S, E, getContext()));
2458  return MatchOperand_Success;
2459 }
2460 
2461 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2462  if (FBS[AArch64::HasV8_1aOps])
2463  Str += "ARMv8.1a";
2464  else if (FBS[AArch64::HasV8_2aOps])
2465  Str += "ARMv8.2a";
2466  else
2467  Str += "(unknown)";
2468 }
2469 
2470 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2471  SMLoc S) {
2472  const uint16_t Op2 = Encoding & 7;
2473  const uint16_t Cm = (Encoding & 0x78) >> 3;
2474  const uint16_t Cn = (Encoding & 0x780) >> 7;
2475  const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2476 
2477  const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2478 
2479  Operands.push_back(
2480  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2481  Operands.push_back(
2482  AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2483  Operands.push_back(
2484  AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2485  Expr = MCConstantExpr::create(Op2, getContext());
2486  Operands.push_back(
2487  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2488 }
2489 
2490 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2491 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2492 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2493  OperandVector &Operands) {
2494  if (Name.find('.') != StringRef::npos)
2495  return TokError("invalid operand");
2496 
2497  Mnemonic = Name;
2498  Operands.push_back(
2499  AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2500 
2501  MCAsmParser &Parser = getParser();
2502  const AsmToken &Tok = Parser.getTok();
2503  StringRef Op = Tok.getString();
2504  SMLoc S = Tok.getLoc();
2505 
2506  if (Mnemonic == "ic") {
2507  const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2508  if (!IC)
2509  return TokError("invalid operand for IC instruction");
2510  else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2511  std::string Str("IC " + std::string(IC->Name) + " requires ");
2513  return TokError(Str.c_str());
2514  }
2515  createSysAlias(IC->Encoding, Operands, S);
2516  } else if (Mnemonic == "dc") {
2517  const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2518  if (!DC)
2519  return TokError("invalid operand for DC instruction");
2520  else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2521  std::string Str("DC " + std::string(DC->Name) + " requires ");
2523  return TokError(Str.c_str());
2524  }
2525  createSysAlias(DC->Encoding, Operands, S);
2526  } else if (Mnemonic == "at") {
2527  const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2528  if (!AT)
2529  return TokError("invalid operand for AT instruction");
2530  else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2531  std::string Str("AT " + std::string(AT->Name) + " requires ");
2533  return TokError(Str.c_str());
2534  }
2535  createSysAlias(AT->Encoding, Operands, S);
2536  } else if (Mnemonic == "tlbi") {
2537  const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2538  if (!TLBI)
2539  return TokError("invalid operand for TLBI instruction");
2540  else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2541  std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2543  return TokError(Str.c_str());
2544  }
2545  createSysAlias(TLBI->Encoding, Operands, S);
2546  }
2547 
2548  Parser.Lex(); // Eat operand.
2549 
2550  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2551  bool HasRegister = false;
2552 
2553  // Check for the optional register operand.
2554  if (parseOptionalToken(AsmToken::Comma)) {
2555  if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2556  return TokError("expected register operand");
2557  HasRegister = true;
2558  }
2559 
2560  if (ExpectRegister && !HasRegister)
2561  return TokError("specified " + Mnemonic + " op requires a register");
2562  else if (!ExpectRegister && HasRegister)
2563  return TokError("specified " + Mnemonic + " op does not use a register");
2564 
2565  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2566  return true;
2567 
2568  return false;
2569 }
2570 
2572 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2573  MCAsmParser &Parser = getParser();
2574  const AsmToken &Tok = Parser.getTok();
2575 
2576  // Can be either a #imm style literal or an option name
2577  if (parseOptionalToken(AsmToken::Hash) ||
2578  Tok.is(AsmToken::Integer)) {
2579  // Immediate operand.
2580  const MCExpr *ImmVal;
2581  SMLoc ExprLoc = getLoc();
2582  if (getParser().parseExpression(ImmVal))
2583  return MatchOperand_ParseFail;
2584  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2585  if (!MCE) {
2586  Error(ExprLoc, "immediate value expected for barrier operand");
2587  return MatchOperand_ParseFail;
2588  }
2589  if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2590  Error(ExprLoc, "barrier operand out of range");
2591  return MatchOperand_ParseFail;
2592  }
2593  auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
2594  Operands.push_back(AArch64Operand::CreateBarrier(
2595  MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
2596  return MatchOperand_Success;
2597  }
2598 
2599  if (Tok.isNot(AsmToken::Identifier)) {
2600  TokError("invalid operand for instruction");
2601  return MatchOperand_ParseFail;
2602  }
2603 
2604  // The only valid named option for ISB is 'sy'
2605  auto DB = AArch64DB::lookupDBByName(Tok.getString());
2606  if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
2607  TokError("'sy' or #imm operand expected");
2608  return MatchOperand_ParseFail;
2609  } else if (!DB) {
2610  TokError("invalid barrier option name");
2611  return MatchOperand_ParseFail;
2612  }
2613 
2614  Operands.push_back(AArch64Operand::CreateBarrier(
2615  DB->Encoding, Tok.getString(), getLoc(), getContext()));
2616  Parser.Lex(); // Consume the option
2617 
2618  return MatchOperand_Success;
2619 }
2620 
2622 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2623  MCAsmParser &Parser = getParser();
2624  const AsmToken &Tok = Parser.getTok();
2625 
2626  if (Tok.isNot(AsmToken::Identifier))
2627  return MatchOperand_NoMatch;
2628 
2629  int MRSReg, MSRReg;
2630  auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
2631  if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
2632  MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
2633  MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
2634  } else
2635  MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
2636 
2637  auto PState = AArch64PState::lookupPStateByName(Tok.getString());
2638  unsigned PStateImm = -1;
2639  if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
2640  PStateImm = PState->Encoding;
2641 
2642  Operands.push_back(
2643  AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
2644  PStateImm, getContext()));
2645  Parser.Lex(); // Eat identifier
2646 
2647  return MatchOperand_Success;
2648 }
2649 
2650 /// tryParseNeonVectorRegister - Parse a vector register operand.
2651 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
2652  MCAsmParser &Parser = getParser();
2653  if (Parser.getTok().isNot(AsmToken::Identifier))
2654  return true;
2655 
2656  SMLoc S = getLoc();
2657  // Check for a vector register specifier first.
2658  StringRef Kind;
2659  int64_t Reg = tryMatchVectorRegister(Kind, false);
2660  if (Reg == -1)
2661  return true;
2662  Operands.push_back(
2663  AArch64Operand::CreateReg(Reg, RegKind::NeonVector, S, getLoc(),
2664  getContext()));
2665 
2666  // If there was an explicit qualifier, that goes on as a literal text
2667  // operand.
2668  if (!Kind.empty())
2669  Operands.push_back(
2670  AArch64Operand::CreateToken(Kind, false, S, getContext()));
2671 
2672  // If there is an index specifier following the register, parse that too.
2673  SMLoc SIdx = getLoc();
2674  if (parseOptionalToken(AsmToken::LBrac)) {
2675  const MCExpr *ImmVal;
2676  if (getParser().parseExpression(ImmVal))
2677  return false;
2678  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2679  if (!MCE) {
2680  TokError("immediate value expected for vector index");
2681  return false;
2682  }
2683 
2684  SMLoc E = getLoc();
2685 
2686  if (parseToken(AsmToken::RBrac, "']' expected"))
2687  return false;
2688 
2689  Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2690  E, getContext()));
2691  }
2692 
2693  return false;
2694 }
2695 
2696 // tryParseSVEDataVectorRegister - Try to parse a SVE vector register name with
2697 // optional kind specifier. If it is a register specifier, eat the token
2698 // and return it.
2700 AArch64AsmParser::tryParseSVERegister(int &Reg, StringRef &Kind,
2701  RegKind MatchKind) {
2702  MCAsmParser &Parser = getParser();
2703  const AsmToken &Tok = Parser.getTok();
2704 
2705  if (Tok.isNot(AsmToken::Identifier))
2706  return MatchOperand_NoMatch;
2707 
2708  StringRef Name = Tok.getString();
2709  // If there is a kind specifier, it's separated from the register name by
2710  // a '.'.
2711  size_t Start = 0, Next = Name.find('.');
2712  StringRef Head = Name.slice(Start, Next);
2713  unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
2714 
2715  if (RegNum) {
2716  if (Next != StringRef::npos) {
2717  Kind = Name.slice(Next, StringRef::npos);
2718  if (!isValidSVEKind(Kind)) {
2719  TokError("invalid sve vector kind qualifier");
2720  return MatchOperand_ParseFail;
2721  }
2722  }
2723  Parser.Lex(); // Eat the register token.
2724 
2725  Reg = RegNum;
2726  return MatchOperand_Success;
2727  }
2728 
2729  return MatchOperand_NoMatch;
2730 }
2731 
2732 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
2734 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
2735  // Check for a SVE predicate register specifier first.
2736  const SMLoc S = getLoc();
2737  StringRef Kind;
2738  int RegNum = -1;
2739  auto Res = tryParseSVERegister(RegNum, Kind, RegKind::SVEPredicateVector);
2740  if (Res != MatchOperand_Success)
2741  return Res;
2742 
2743  unsigned ElementWidth = StringSwitch<unsigned>(Kind.lower())
2744  .Case("", -1)
2745  .Case(".b", 8)
2746  .Case(".h", 16)
2747  .Case(".s", 32)
2748  .Case(".d", 64)
2749  .Case(".q", 128)
2750  .Default(0);
2751 
2752  if (!ElementWidth)
2753  return MatchOperand_NoMatch;
2754 
2755  Operands.push_back(
2756  AArch64Operand::CreateReg(RegNum, RegKind::SVEPredicateVector,
2757  ElementWidth, S, getLoc(), getContext()));
2758 
2759  // Not all predicates are followed by a '/m' or '/z'.
2760  MCAsmParser &Parser = getParser();
2761  if (Parser.getTok().isNot(AsmToken::Slash))
2762  return MatchOperand_Success;
2763 
2764  // But when they do they shouldn't have an element type suffix.
2765  if (!Kind.empty()) {
2766  Error(S, "not expecting size suffix");
2767  return MatchOperand_ParseFail;
2768  }
2769 
2770  // Add a literal slash as operand
2771  Operands.push_back(
2772  AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
2773 
2774  Parser.Lex(); // Eat the slash.
2775 
2776  // Zeroing or merging?
2777  auto Pred = Parser.getTok().getString().lower();
2778  if (Pred != "z" && Pred != "m") {
2779  Error(getLoc(), "expecting 'm' or 'z' predication");
2780  return MatchOperand_ParseFail;
2781  }
2782 
2783  // Add zero/merge token.
2784  const char *ZM = Pred == "z" ? "z" : "m";
2785  Operands.push_back(
2786  AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
2787 
2788  Parser.Lex(); // Eat zero/merge token.
2789  return MatchOperand_Success;
2790 }
2791 
2792 /// parseRegister - Parse a non-vector register operand.
2793 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2794  SMLoc S = getLoc();
2795  // Try for a vector (neon) register.
2796  if (!tryParseNeonVectorRegister(Operands))
2797  return false;
2798 
2799  // Try for a scalar register.
2800  int64_t Reg = tryParseRegister();
2801  if (Reg == -1)
2802  return true;
2803  Operands.push_back(AArch64Operand::CreateReg(Reg, RegKind::Scalar, S,
2804  getLoc(), getContext()));
2805 
2806  return false;
2807 }
2808 
2809 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2810  MCAsmParser &Parser = getParser();
2811  bool HasELFModifier = false;
2813 
2814  if (parseOptionalToken(AsmToken::Colon)) {
2815  HasELFModifier = true;
2816 
2817  if (Parser.getTok().isNot(AsmToken::Identifier))
2818  return TokError("expect relocation specifier in operand after ':'");
2819 
2820  std::string LowerCase = Parser.getTok().getIdentifier().lower();
2821  RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2822  .Case("lo12", AArch64MCExpr::VK_LO12)
2823  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2824  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2825  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2826  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2827  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2828  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2829  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2830  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2831  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2832  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2833  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2834  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2835  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2836  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2837  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2838  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2839  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2840  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2841  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2842  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2843  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2844  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2845  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2846  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2847  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2848  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2849  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2851  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2853  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2854  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2855  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2858 
2859  if (RefKind == AArch64MCExpr::VK_INVALID)
2860  return TokError("expect relocation specifier in operand after ':'");
2861 
2862  Parser.Lex(); // Eat identifier
2863 
2864  if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
2865  return true;
2866  }
2867 
2868  if (getParser().parseExpression(ImmVal))
2869  return true;
2870 
2871  if (HasELFModifier)
2872  ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2873 
2874  return false;
2875 }
2876 
2877 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2878 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2879  MCAsmParser &Parser = getParser();
2880  assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2881  SMLoc S = getLoc();
2882  Parser.Lex(); // Eat left bracket token.
2883  StringRef Kind;
2884  int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2885  if (FirstReg == -1)
2886  return true;
2887  int64_t PrevReg = FirstReg;
2888  unsigned Count = 1;
2889 
2890  if (parseOptionalToken(AsmToken::Minus)) {
2891  SMLoc Loc = getLoc();
2892  StringRef NextKind;
2893  int64_t Reg = tryMatchVectorRegister(NextKind, true);
2894  if (Reg == -1)
2895  return true;
2896  // Any Kind suffices must match on all regs in the list.
2897  if (Kind != NextKind)
2898  return Error(Loc, "mismatched register size suffix");
2899 
2900  unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2901 
2902  if (Space == 0 || Space > 3) {
2903  return Error(Loc, "invalid number of vectors");
2904  }
2905 
2906  Count += Space;
2907  }
2908  else {
2909  while (parseOptionalToken(AsmToken::Comma)) {
2910  SMLoc Loc = getLoc();
2911  StringRef NextKind;
2912  int64_t Reg = tryMatchVectorRegister(NextKind, true);
2913  if (Reg == -1)
2914  return true;
2915  // Any Kind suffices must match on all regs in the list.
2916  if (Kind != NextKind)
2917  return Error(Loc, "mismatched register size suffix");
2918 
2919  // Registers must be incremental (with wraparound at 31)
2920  if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2921  (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2922  return Error(Loc, "registers must be sequential");
2923 
2924  PrevReg = Reg;
2925  ++Count;
2926  }
2927  }
2928 
2929  if (parseToken(AsmToken::RCurly, "'}' expected"))
2930  return true;
2931 
2932  if (Count > 4)
2933  return Error(S, "invalid number of vectors");
2934 
2935  unsigned NumElements = 0;
2936  char ElementKind = 0;
2937  if (!Kind.empty())
2938  parseValidVectorKind(Kind, NumElements, ElementKind);
2939 
2940  Operands.push_back(AArch64Operand::CreateVectorList(
2941  FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2942 
2943  // If there is an index specifier following the list, parse that too.
2944  SMLoc SIdx = getLoc();
2945  if (parseOptionalToken(AsmToken::LBrac)) { // Eat left bracket token.
2946  const MCExpr *ImmVal;
2947  if (getParser().parseExpression(ImmVal))
2948  return false;
2949  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2950  if (!MCE) {
2951  TokError("immediate value expected for vector index");
2952  return false;
2953  }
2954 
2955  SMLoc E = getLoc();
2956  if (parseToken(AsmToken::RBrac, "']' expected"))
2957  return false;
2958 
2959  Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2960  E, getContext()));
2961  }
2962  return false;
2963 }
2964 
2966 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2967  MCAsmParser &Parser = getParser();
2968  const AsmToken &Tok = Parser.getTok();
2969  if (!Tok.is(AsmToken::Identifier))
2970  return MatchOperand_NoMatch;
2971 
2972  unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), RegKind::Scalar);
2973 
2974  MCContext &Ctx = getContext();
2975  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2976  if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2977  return MatchOperand_NoMatch;
2978 
2979  SMLoc S = getLoc();
2980  Parser.Lex(); // Eat register
2981 
2982  if (!parseOptionalToken(AsmToken::Comma)) {
2983  Operands.push_back(
2984  AArch64Operand::CreateReg(RegNum, RegKind::Scalar, S, getLoc(), Ctx));
2985  return MatchOperand_Success;
2986  }
2987 
2988  parseOptionalToken(AsmToken::Hash);
2989 
2990  if (Parser.getTok().isNot(AsmToken::Integer)) {
2991  Error(getLoc(), "index must be absent or #0");
2992  return MatchOperand_ParseFail;
2993  }
2994 
2995  const MCExpr *ImmVal;
2996  if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2997  cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2998  Error(getLoc(), "index must be absent or #0");
2999  return MatchOperand_ParseFail;
3000  }
3001 
3002  Operands.push_back(
3003  AArch64Operand::CreateReg(RegNum, RegKind::Scalar, S, getLoc(), Ctx));
3004  return MatchOperand_Success;
3005 }
3006 
3007 /// parseOperand - Parse a arm instruction operand. For now this parses the
3008 /// operand regardless of the mnemonic.
3009 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3010  bool invertCondCode) {
3011  MCAsmParser &Parser = getParser();
3012 
3013  OperandMatchResultTy ResTy =
3014  MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3015 
3016  // Check if the current operand has a custom associated parser, if so, try to
3017  // custom parse the operand, or fallback to the general approach.
3018  if (ResTy == MatchOperand_Success)
3019  return false;
3020  // If there wasn't a custom match, try the generic matcher below. Otherwise,
3021  // there was a match, but an error occurred, in which case, just return that
3022  // the operand parsing failed.
3023  if (ResTy == MatchOperand_ParseFail)
3024  return true;
3025 
3026  // Nothing custom, so do general case parsing.
3027  SMLoc S, E;
3028  switch (getLexer().getKind()) {
3029  default: {
3030  SMLoc S = getLoc();
3031  const MCExpr *Expr;
3032  if (parseSymbolicImmVal(Expr))
3033  return Error(S, "invalid operand");
3034 
3035  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3036  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3037  return false;
3038  }
3039  case AsmToken::LBrac: {
3040  SMLoc Loc = Parser.getTok().getLoc();
3041  Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3042  getContext()));
3043  Parser.Lex(); // Eat '['
3044 
3045  // There's no comma after a '[', so we can parse the next operand
3046  // immediately.
3047  return parseOperand(Operands, false, false);
3048  }
3049  case AsmToken::LCurly:
3050  return parseVectorList(Operands);
3051  case AsmToken::Identifier: {
3052  // If we're expecting a Condition Code operand, then just parse that.
3053  if (isCondCode)
3054  return parseCondCode(Operands, invertCondCode);
3055 
3056  // If it's a register name, parse it.
3057  if (!parseRegister(Operands))
3058  return false;
3059 
3060  // This could be an optional "shift" or "extend" operand.
3061  OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3062  // We can only continue if no tokens were eaten.
3063  if (GotShift != MatchOperand_NoMatch)
3064  return GotShift;
3065 
3066  // This was not a register so parse other operands that start with an
3067  // identifier (like labels) as expressions and create them as immediates.
3068  const MCExpr *IdVal;
3069  S = getLoc();
3070  if (getParser().parseExpression(IdVal))
3071  return true;
3072  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3073  Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3074  return false;
3075  }
3076  case AsmToken::Integer:
3077  case AsmToken::Real:
3078  case AsmToken::Hash: {
3079  // #42 -> immediate.
3080  S = getLoc();
3081 
3082  parseOptionalToken(AsmToken::Hash);
3083 
3084  // Parse a negative sign
3085  bool isNegative = false;
3086  if (Parser.getTok().is(AsmToken::Minus)) {
3087  isNegative = true;
3088  // We need to consume this token only when we have a Real, otherwise
3089  // we let parseSymbolicImmVal take care of it
3090  if (Parser.getLexer().peekTok().is(AsmToken::Real))
3091  Parser.Lex();
3092  }
3093 
3094  // The only Real that should come through here is a literal #0.0 for
3095  // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3096  // so convert the value.
3097  const AsmToken &Tok = Parser.getTok();
3098  if (Tok.is(AsmToken::Real)) {
3099  APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3100  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3101  if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3102  Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3103  Mnemonic != "fcmlt")
3104  return TokError("unexpected floating point literal");
3105  else if (IntVal != 0 || isNegative)
3106  return TokError("expected floating-point constant #0.0");
3107  Parser.Lex(); // Eat the token.
3108 
3109  Operands.push_back(
3110  AArch64Operand::CreateToken("#0", false, S, getContext()));
3111  Operands.push_back(
3112  AArch64Operand::CreateToken(".0", false, S, getContext()));
3113  return false;
3114  }
3115 
3116  const MCExpr *ImmVal;
3117  if (parseSymbolicImmVal(ImmVal))
3118  return true;
3119 
3120  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3121  Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3122  return false;
3123  }
3124  case AsmToken::Equal: {
3125  SMLoc Loc = getLoc();
3126  if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3127  return TokError("unexpected token in operand");
3128  Parser.Lex(); // Eat '='
3129  const MCExpr *SubExprVal;
3130  if (getParser().parseExpression(SubExprVal))
3131  return true;
3132 
3133  if (Operands.size() < 2 ||
3134  !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3135  return Error(Loc, "Only valid when first operand is register");
3136 
3137  bool IsXReg =
3138  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3139  Operands[1]->getReg());
3140 
3141  MCContext& Ctx = getContext();
3142  E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3143  // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3144  if (isa<MCConstantExpr>(SubExprVal)) {
3145  uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3146  uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3147  while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3148  ShiftAmt += 16;
3149  Imm >>= 16;
3150  }
3151  if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3152  Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3153  Operands.push_back(AArch64Operand::CreateImm(
3154  MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3155  if (ShiftAmt)
3156  Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3157  ShiftAmt, true, S, E, Ctx));
3158  return false;
3159  }
3160  APInt Simm = APInt(64, Imm << ShiftAmt);
3161  // check if the immediate is an unsigned or signed 32-bit int for W regs
3162  if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3163  return Error(Loc, "Immediate too large for register");
3164  }
3165  // If it is a label or an imm that cannot fit in a movz, put it into CP.
3166  const MCExpr *CPLoc =
3167  getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3168  Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3169  return false;
3170  }
3171  }
3172 }
3173 
3174 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3175 /// operands.
3176 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3177  StringRef Name, SMLoc NameLoc,
3178  OperandVector &Operands) {
3179  MCAsmParser &Parser = getParser();
3180  Name = StringSwitch<StringRef>(Name.lower())
3181  .Case("beq", "b.eq")
3182  .Case("bne", "b.ne")
3183  .Case("bhs", "b.hs")
3184  .Case("bcs", "b.cs")
3185  .Case("blo", "b.lo")
3186  .Case("bcc", "b.cc")
3187  .Case("bmi", "b.mi")
3188  .Case("bpl", "b.pl")
3189  .Case("bvs", "b.vs")
3190  .Case("bvc", "b.vc")
3191  .Case("bhi", "b.hi")
3192  .Case("bls", "b.ls")
3193  .Case("bge", "b.ge")
3194  .Case("blt", "b.lt")
3195  .Case("bgt", "b.gt")
3196  .Case("ble", "b.le")
3197  .Case("bal", "b.al")
3198  .Case("bnv", "b.nv")
3199  .Default(Name);
3200 
3201  // First check for the AArch64-specific .req directive.
3202  if (Parser.getTok().is(AsmToken::Identifier) &&
3203  Parser.getTok().getIdentifier() == ".req") {
3204  parseDirectiveReq(Name, NameLoc);
3205  // We always return 'error' for this, as we're done with this
3206  // statement and don't need to match the 'instruction."
3207  return true;
3208  }
3209 
3210  // Create the leading tokens for the mnemonic, split by '.' characters.
3211  size_t Start = 0, Next = Name.find('.');
3212  StringRef Head = Name.slice(Start, Next);
3213 
3214  // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3215  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
3216  return parseSysAlias(Head, NameLoc, Operands);
3217 
3218  Operands.push_back(
3219  AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3220  Mnemonic = Head;
3221 
3222  // Handle condition codes for a branch mnemonic
3223  if (Head == "b" && Next != StringRef::npos) {
3224  Start = Next;
3225  Next = Name.find('.', Start + 1);
3226  Head = Name.slice(Start + 1, Next);
3227 
3228  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3229  (Head.data() - Name.data()));
3230  AArch64CC::CondCode CC = parseCondCodeString(Head);
3231  if (CC == AArch64CC::Invalid)
3232  return Error(SuffixLoc, "invalid condition code");
3233  Operands.push_back(
3234  AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3235  Operands.push_back(
3236  AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3237  }
3238 
3239  // Add the remaining tokens in the mnemonic.
3240  while (Next != StringRef::npos) {
3241  Start = Next;
3242  Next = Name.find('.', Start + 1);
3243  Head = Name.slice(Start, Next);
3244  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3245  (Head.data() - Name.data()) + 1);
3246  Operands.push_back(
3247  AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3248  }
3249 
3250  // Conditional compare instructions have a Condition Code operand, which needs
3251  // to be parsed and an immediate operand created.
3252  bool condCodeFourthOperand =
3253  (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3254  Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3255  Head == "csinc" || Head == "csinv" || Head == "csneg");
3256 
3257  // These instructions are aliases to some of the conditional select
3258  // instructions. However, the condition code is inverted in the aliased
3259  // instruction.
3260  //
3261  // FIXME: Is this the correct way to handle these? Or should the parser
3262  // generate the aliased instructions directly?
3263  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3264  bool condCodeThirdOperand =
3265  (Head == "cinc" || Head == "cinv" || Head == "cneg");
3266 
3267  // Read the remaining operands.
3268  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3269  // Read the first operand.
3270  if (parseOperand(Operands, false, false)) {
3271  return true;
3272  }
3273 
3274  unsigned N = 2;
3275  while (parseOptionalToken(AsmToken::Comma)) {
3276  // Parse and remember the operand.
3277  if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3278  (N == 3 && condCodeThirdOperand) ||
3279  (N == 2 && condCodeSecondOperand),
3280  condCodeSecondOperand || condCodeThirdOperand)) {
3281  return true;
3282  }
3283 
3284  // After successfully parsing some operands there are two special cases to
3285  // consider (i.e. notional operands not separated by commas). Both are due
3286  // to memory specifiers:
3287  // + An RBrac will end an address for load/store/prefetch
3288  // + An '!' will indicate a pre-indexed operation.
3289  //
3290  // It's someone else's responsibility to make sure these tokens are sane
3291  // in the given context!
3292 
3293  SMLoc RLoc = Parser.getTok().getLoc();
3294  if (parseOptionalToken(AsmToken::RBrac))
3295  Operands.push_back(
3296  AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3297  SMLoc ELoc = Parser.getTok().getLoc();
3298  if (parseOptionalToken(AsmToken::Exclaim))
3299  Operands.push_back(
3300  AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3301 
3302  ++N;
3303  }
3304  }
3305 
3306  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3307  return true;
3308 
3309  return false;
3310 }
3311 
3312 // FIXME: This entire function is a giant hack to provide us with decent
3313 // operand range validation/diagnostics until TableGen/MC can be extended
3314 // to support autogeneration of this kind of validation.
3315 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3316  SmallVectorImpl<SMLoc> &Loc) {
3317  const MCRegisterInfo *RI = getContext().getRegisterInfo();
3318  // Check for indexed addressing modes w/ the base register being the
3319  // same as a destination/source register or pair load where
3320  // the Rt == Rt2. All of those are undefined behaviour.
3321  switch (Inst.getOpcode()) {
3322  case AArch64::LDPSWpre:
3323  case AArch64::LDPWpost:
3324  case AArch64::LDPWpre:
3325  case AArch64::LDPXpost:
3326  case AArch64::LDPXpre: {
3327  unsigned Rt = Inst.getOperand(1).getReg();
3328  unsigned Rt2 = Inst.getOperand(2).getReg();
3329  unsigned Rn = Inst.getOperand(3).getReg();
3330  if (RI->isSubRegisterEq(Rn, Rt))
3331  return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3332  "is also a destination");
3333  if (RI->isSubRegisterEq(Rn, Rt2))
3334  return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3335  "is also a destination");
3337  }
3338  case AArch64::LDPDi:
3339  case AArch64::LDPQi:
3340  case AArch64::LDPSi:
3341  case AArch64::LDPSWi:
3342  case AArch64::LDPWi:
3343  case AArch64::LDPXi: {
3344  unsigned Rt = Inst.getOperand(0).getReg();
3345  unsigned Rt2 = Inst.getOperand(1).getReg();
3346  if (Rt == Rt2)
3347  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3348  break;
3349  }
3350  case AArch64::LDPDpost:
3351  case AArch64::LDPDpre:
3352  case AArch64::LDPQpost:
3353  case AArch64::LDPQpre:
3354  case AArch64::LDPSpost:
3355  case AArch64::LDPSpre:
3356  case AArch64::LDPSWpost: {
3357  unsigned Rt = Inst.getOperand(1).getReg();
3358  unsigned Rt2 = Inst.getOperand(2).getReg();
3359  if (Rt == Rt2)
3360  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3361  break;
3362  }
3363  case AArch64::STPDpost:
3364  case AArch64::STPDpre:
3365  case AArch64::STPQpost:
3366  case AArch64::STPQpre:
3367  case AArch64::STPSpost:
3368  case AArch64::STPSpre:
3369  case AArch64::STPWpost:
3370  case AArch64::STPWpre:
3371  case AArch64::STPXpost:
3372  case AArch64::STPXpre: {
3373  unsigned Rt = Inst.getOperand(1).getReg();
3374  unsigned Rt2 = Inst.getOperand(2).getReg();
3375  unsigned Rn = Inst.getOperand(3).getReg();
3376  if (RI->isSubRegisterEq(Rn, Rt))
3377  return Error(Loc[0], "unpredictable STP instruction, writeback base "
3378  "is also a source");
3379  if (RI->isSubRegisterEq(Rn, Rt2))
3380  return Error(Loc[1], "unpredictable STP instruction, writeback base "
3381  "is also a source");
3382  break;
3383  }
3384  case AArch64::LDRBBpre:
3385  case AArch64::LDRBpre:
3386  case AArch64::LDRHHpre:
3387  case AArch64::LDRHpre:
3388  case AArch64::LDRSBWpre:
3389  case AArch64::LDRSBXpre:
3390  case AArch64::LDRSHWpre:
3391  case AArch64::LDRSHXpre:
3392  case AArch64::LDRSWpre:
3393  case AArch64::LDRWpre:
3394  case AArch64::LDRXpre:
3395  case AArch64::LDRBBpost:
3396  case AArch64::LDRBpost:
3397  case AArch64::LDRHHpost:
3398  case AArch64::LDRHpost:
3399  case AArch64::LDRSBWpost:
3400  case AArch64::LDRSBXpost:
3401  case AArch64::LDRSHWpost:
3402  case AArch64::LDRSHXpost:
3403  case AArch64::LDRSWpost:
3404  case AArch64::LDRWpost:
3405  case AArch64::LDRXpost: {
3406  unsigned Rt = Inst.getOperand(1).getReg();
3407  unsigned Rn = Inst.getOperand(2).getReg();
3408  if (RI->isSubRegisterEq(Rn, Rt))
3409  return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3410  "is also a source");
3411  break;
3412  }
3413  case AArch64::STRBBpost:
3414  case AArch64::STRBpost:
3415  case AArch64::STRHHpost:
3416  case AArch64::STRHpost:
3417  case AArch64::STRWpost:
3418  case AArch64::STRXpost:
3419  case AArch64::STRBBpre:
3420  case AArch64::STRBpre:
3421  case AArch64::STRHHpre:
3422  case AArch64::STRHpre:
3423  case AArch64::STRWpre:
3424  case AArch64::STRXpre: {
3425  unsigned Rt = Inst.getOperand(1).getReg();
3426  unsigned Rn = Inst.getOperand(2).getReg();
3427  if (RI->isSubRegisterEq(Rn, Rt))
3428  return Error(Loc[0], "unpredictable STR instruction, writeback base "
3429  "is also a source");
3430  break;
3431  }
3432  }
3433 
3434  // Now check immediate ranges. Separate from the above as there is overlap
3435  // in the instructions being checked and this keeps the nested conditionals
3436  // to a minimum.
3437  switch (Inst.getOpcode()) {
3438  case AArch64::ADDSWri:
3439  case AArch64::ADDSXri:
3440  case AArch64::ADDWri:
3441  case AArch64::ADDXri:
3442  case AArch64::SUBSWri:
3443  case AArch64::SUBSXri:
3444  case AArch64::SUBWri:
3445  case AArch64::SUBXri: {
3446  // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3447  // some slight duplication here.
3448  if (Inst.getOperand(2).isExpr()) {
3449  const MCExpr *Expr = Inst.getOperand(2).getExpr();
3450  AArch64MCExpr::VariantKind ELFRefKind;
3451  MCSymbolRefExpr::VariantKind DarwinRefKind;
3452  int64_t Addend;
3453  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3454 
3455  // Only allow these with ADDXri.
3456  if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3457  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3458  Inst.getOpcode() == AArch64::ADDXri)
3459  return false;
3460 
3461  // Only allow these with ADDXri/ADDWri
3462  if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3463  ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3464  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3465  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3466  ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3467  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3468  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3469  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3470  (Inst.getOpcode() == AArch64::ADDXri ||
3471  Inst.getOpcode() == AArch64::ADDWri))
3472  return false;
3473 
3474  // Don't allow symbol refs in the immediate field otherwise
3475  // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
3476  // operands of the original instruction (i.e. 'add w0, w1, borked' vs
3477  // 'cmp w0, 'borked')
3478  return Error(Loc.back(), "invalid immediate expression");
3479  }
3480  // We don't validate more complex expressions here
3481  }
3482  return false;
3483  }
3484  default:
3485  return false;
3486  }
3487 }
3488 
3489 static std::string AArch64MnemonicSpellCheck(StringRef S, uint64_t FBS,
3490  unsigned VariantID = 0);
3491 
3492 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
3493  OperandVector &Operands) {
3494  switch (ErrCode) {
3495  case Match_InvalidTiedOperand:
3496  return Error(Loc, "operand must match destination register");
3497  case Match_MissingFeature:
3498  return Error(Loc,
3499  "instruction requires a CPU feature not currently enabled");
3500  case Match_InvalidOperand:
3501  return Error(Loc, "invalid operand for instruction");
3502  case Match_InvalidSuffix:
3503  return Error(Loc, "invalid type suffix for instruction");
3504  case Match_InvalidCondCode:
3505  return Error(Loc, "expected AArch64 condition code");
3506  case Match_AddSubRegExtendSmall:
3507  return Error(Loc,
3508  "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3509  case Match_AddSubRegExtendLarge:
3510  return Error(Loc,
3511  "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3512  case Match_AddSubSecondSource:
3513  return Error(Loc,
3514  "expected compatible register, symbol or integer in range [0, 4095]");
3515  case Match_LogicalSecondSource:
3516  return Error(Loc, "expected compatible register or logical immediate");
3517  case Match_InvalidMovImm32Shift:
3518  return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3519  case Match_InvalidMovImm64Shift:
3520  return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3521  case Match_AddSubRegShift32:
3522  return Error(Loc,
3523  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3524  case Match_AddSubRegShift64:
3525  return Error(Loc,
3526  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3527  case Match_InvalidFPImm:
3528  return Error(Loc,
3529  "expected compatible register or floating-point constant");
3530  case Match_InvalidMemoryIndexedSImm6:
3531  return Error(Loc, "index must be an integer in range [-32, 31].");
3532  case Match_InvalidMemoryIndexedSImm9:
3533  return Error(Loc, "index must be an integer in range [-256, 255].");
3534  case Match_InvalidMemoryIndexedSImm10:
3535  return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
3536  case Match_InvalidMemoryIndexed4SImm7:
3537  return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3538  case Match_InvalidMemoryIndexed8SImm7:
3539  return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3540  case Match_InvalidMemoryIndexed16SImm7:
3541  return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3542  case Match_InvalidMemoryWExtend8:
3543  return Error(Loc,
3544  "expected 'uxtw' or 'sxtw' with optional shift of #0");
3545  case Match_InvalidMemoryWExtend16:
3546  return Error(Loc,
3547  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3548  case Match_InvalidMemoryWExtend32:
3549  return Error(Loc,
3550  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3551  case Match_InvalidMemoryWExtend64:
3552  return Error(Loc,
3553  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3554  case Match_InvalidMemoryWExtend128:
3555  return Error(Loc,
3556  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3557  case Match_InvalidMemoryXExtend8:
3558  return Error(Loc,
3559  "expected 'lsl' or 'sxtx' with optional shift of #0");
3560  case Match_InvalidMemoryXExtend16:
3561  return Error(Loc,
3562  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3563  case Match_InvalidMemoryXExtend32:
3564  return Error(Loc,
3565  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3566  case Match_InvalidMemoryXExtend64:
3567  return Error(Loc,
3568  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3569  case Match_InvalidMemoryXExtend128:
3570  return Error(Loc,
3571  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3572  case Match_InvalidMemoryIndexed1:
3573  return Error(Loc, "index must be an integer in range [0, 4095].");
3574  case Match_InvalidMemoryIndexed2:
3575  return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3576  case Match_InvalidMemoryIndexed4:
3577  return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3578  case Match_InvalidMemoryIndexed8:
3579  return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3580  case Match_InvalidMemoryIndexed16:
3581  return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3582  case Match_InvalidImm0_1:
3583  return Error(Loc, "immediate must be an integer in range [0, 1].");
3584  case Match_InvalidImm0_7:
3585  return Error(Loc, "immediate must be an integer in range [0, 7].");
3586  case Match_InvalidImm0_15:
3587  return Error(Loc, "immediate must be an integer in range [0, 15].");
3588  case Match_InvalidImm0_31:
3589  return Error(Loc, "immediate must be an integer in range [0, 31].");
3590  case Match_InvalidImm0_63:
3591  return Error(Loc, "immediate must be an integer in range [0, 63].");
3592  case Match_InvalidImm0_127:
3593  return Error(Loc, "immediate must be an integer in range [0, 127].");
3594  case Match_InvalidImm0_255:
3595  return Error(Loc, "immediate must be an integer in range [0, 255].");
3596  case Match_InvalidImm0_65535:
3597  return Error(Loc, "immediate must be an integer in range [0, 65535].");
3598  case Match_InvalidImm1_8:
3599  return Error(Loc, "immediate must be an integer in range [1, 8].");
3600  case Match_InvalidImm1_16:
3601  return Error(Loc, "immediate must be an integer in range [1, 16].");
3602  case Match_InvalidImm1_32:
3603  return Error(Loc, "immediate must be an integer in range [1, 32].");
3604  case Match_InvalidImm1_64:
3605  return Error(Loc, "immediate must be an integer in range [1, 64].");
3606  case Match_InvalidIndex1:
3607  return Error(Loc, "expected lane specifier '[1]'");
3608  case Match_InvalidIndexB:
3609  return Error(Loc, "vector lane must be an integer in range [0, 15].");
3610  case Match_InvalidIndexH:
3611  return Error(Loc, "vector lane must be an integer in range [0, 7].");
3612  case Match_InvalidIndexS:
3613  return Error(Loc, "vector lane must be an integer in range [0, 3].");
3614  case Match_InvalidIndexD:
3615  return Error(Loc, "vector lane must be an integer in range [0, 1].");
3616  case Match_InvalidLabel:
3617  return Error(Loc, "expected label or encodable integer pc offset");
3618  case Match_MRS:
3619  return Error(Loc, "expected readable system register");
3620  case Match_MSR:
3621  return Error(Loc, "expected writable system register or pstate");
3622  case Match_InvalidComplexRotationEven:
3623  return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
3624  case Match_InvalidComplexRotationOdd:
3625  return Error(Loc, "complex rotation must be 90 or 270.");
3626  case Match_MnemonicFail: {
3627  std::string Suggestion = AArch64MnemonicSpellCheck(
3628  ((AArch64Operand &)*Operands[0]).getToken(),
3629  ComputeAvailableFeatures(STI->getFeatureBits()));
3630  return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
3631  }
3632  case Match_InvalidSVEPattern:
3633  return Error(Loc, "invalid predicate pattern");
3634  case Match_InvalidSVEPredicateAnyReg:
3635  case Match_InvalidSVEPredicateBReg:
3636  case Match_InvalidSVEPredicateHReg:
3637  case Match_InvalidSVEPredicateSReg:
3638  case Match_InvalidSVEPredicateDReg:
3639  return Error(Loc, "invalid predicate register.");
3640  case Match_InvalidSVEPredicate3bAnyReg:
3641  case Match_InvalidSVEPredicate3bBReg:
3642  case Match_InvalidSVEPredicate3bHReg:
3643  case Match_InvalidSVEPredicate3bSReg:
3644  case Match_InvalidSVEPredicate3bDReg:
3645  return Error(Loc, "restricted predicate has range [0, 7].");
3646  default:
3647  llvm_unreachable("unexpected error code!");
3648  }
3649 }
3650 
3651 static const char *getSubtargetFeatureName(uint64_t Val);
3652 
3653 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3654  OperandVector &Operands,
3655  MCStreamer &Out,
3656  uint64_t &ErrorInfo,
3657  bool MatchingInlineAsm) {
3658  assert(!Operands.empty() && "Unexpect empty operand list!");
3659  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3660  assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3661 
3662  StringRef Tok = Op.getToken();
3663  unsigned NumOperands = Operands.size();
3664 
3665  if (NumOperands == 4 && Tok == "lsl") {
3666  AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3667  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3668  if (Op2.isScalarReg() && Op3.isImm()) {
3669  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3670  if (Op3CE) {
3671  uint64_t Op3Val = Op3CE->getValue();
3672  uint64_t NewOp3Val = 0;
3673  uint64_t NewOp4Val = 0;
3674  if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3675  Op2.getReg())) {
3676  NewOp3Val = (32 - Op3Val) & 0x1f;
3677  NewOp4Val = 31 - Op3Val;
3678  } else {
3679  NewOp3Val = (64 - Op3Val) & 0x3f;
3680  NewOp4Val = 63 - Op3Val;
3681  }
3682 
3683  const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3684  const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3685 
3686  Operands[0] = AArch64Operand::CreateToken(
3687  "ubfm", false, Op.getStartLoc(), getContext());
3688  Operands.push_back(AArch64Operand::CreateImm(
3689  NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3690  Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3691  Op3.getEndLoc(), getContext());
3692  }
3693  }
3694  } else if (NumOperands == 4 && Tok == "bfc") {
3695  // FIXME: Horrible hack to handle BFC->BFM alias.
3696  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3697  AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3698  AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3699 
3700  if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
3701  const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3702  const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3703 
3704  if (LSBCE && WidthCE) {
3705  uint64_t LSB = LSBCE->getValue();
3706  uint64_t Width = WidthCE->getValue();
3707 
3708  uint64_t RegWidth = 0;
3709  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3710  Op1.getReg()))
3711  RegWidth = 64;
3712  else
3713  RegWidth = 32;
3714 
3715  if (LSB >= RegWidth)
3716  return Error(LSBOp.getStartLoc(),
3717  "expected integer in range [0, 31]");
3718  if (Width < 1 || Width > RegWidth)
3719  return Error(WidthOp.getStartLoc(),
3720  "expected integer in range [1, 32]");
3721 
3722  uint64_t ImmR = 0;
3723  if (RegWidth == 32)
3724  ImmR = (32 - LSB) & 0x1f;
3725  else
3726  ImmR = (64 - LSB) & 0x3f;
3727 
3728  uint64_t ImmS = Width - 1;
3729 
3730  if (ImmR != 0 && ImmS >= ImmR)
3731  return Error(WidthOp.getStartLoc(),
3732  "requested insert overflows register");
3733 
3734  const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3735  const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3736  Operands[0] = AArch64Operand::CreateToken(
3737  "bfm", false, Op.getStartLoc(), getContext());
3738  Operands[2] = AArch64Operand::CreateReg(
3739  RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
3740  SMLoc(), SMLoc(), getContext());
3741  Operands[3] = AArch64Operand::CreateImm(
3742  ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3743  Operands.emplace_back(
3744  AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3745  WidthOp.getEndLoc(), getContext()));
3746  }
3747  }
3748  } else if (NumOperands == 5) {
3749  // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3750  // UBFIZ -> UBFM aliases.
3751  if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3752  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3753  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3754  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3755 
3756  if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
3757  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3758  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3759 
3760  if (Op3CE && Op4CE) {
3761  uint64_t Op3Val = Op3CE->getValue();
3762  uint64_t Op4Val = Op4CE->getValue();
3763 
3764  uint64_t RegWidth = 0;
3765  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3766  Op1.getReg()))
3767  RegWidth = 64;
3768  else
3769  RegWidth = 32;
3770 
3771  if (Op3Val >= RegWidth)
3772  return Error(Op3.getStartLoc(),
3773  "expected integer in range [0, 31]");
3774  if (Op4Val < 1 || Op4Val > RegWidth)
3775  return Error(Op4.getStartLoc(),
3776  "expected integer in range [1, 32]");
3777 
3778  uint64_t NewOp3Val = 0;
3779  if (RegWidth == 32)
3780  NewOp3Val = (32 - Op3Val) & 0x1f;
3781  else
3782  NewOp3Val = (64 - Op3Val) & 0x3f;
3783 
3784  uint64_t NewOp4Val = Op4Val - 1;
3785 
3786  if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3787  return Error(Op4.getStartLoc(),
3788  "requested insert overflows register");
3789 
3790  const MCExpr *NewOp3 =
3791  MCConstantExpr::create(NewOp3Val, getContext());
3792  const MCExpr *NewOp4 =
3793  MCConstantExpr::create(NewOp4Val, getContext());
3794  Operands[3] = AArch64Operand::CreateImm(
3795  NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3796  Operands[4] = AArch64Operand::CreateImm(
3797  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3798  if (Tok == "bfi")
3799  Operands[0] = AArch64Operand::CreateToken(
3800  "bfm", false, Op.getStartLoc(), getContext());
3801  else if (Tok == "sbfiz")
3802  Operands[0] = AArch64Operand::CreateToken(
3803  "sbfm", false, Op.getStartLoc(), getContext());
3804  else if (Tok == "ubfiz")
3805  Operands[0] = AArch64Operand::CreateToken(
3806  "ubfm", false, Op.getStartLoc(), getContext());
3807  else
3808  llvm_unreachable("No valid mnemonic for alias?");
3809  }
3810  }
3811 
3812  // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3813  // UBFX -> UBFM aliases.
3814  } else if (NumOperands == 5 &&
3815  (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3816  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3817  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3818  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3819 
3820  if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
3821  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3822  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3823 
3824  if (Op3CE && Op4CE) {
3825  uint64_t Op3Val = Op3CE->getValue();
3826  uint64_t Op4Val = Op4CE->getValue();
3827 
3828  uint64_t RegWidth = 0;
3829  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3830  Op1.getReg()))
3831  RegWidth = 64;
3832  else
3833  RegWidth = 32;
3834 
3835  if (Op3Val >= RegWidth)
3836  return Error(Op3.getStartLoc(),
3837  "expected integer in range [0, 31]");
3838  if (Op4Val < 1 || Op4Val > RegWidth)
3839  return Error(Op4.getStartLoc(),
3840  "expected integer in range [1, 32]");
3841 
3842  uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3843 
3844  if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3845  return Error(Op4.getStartLoc(),
3846  "requested extract overflows register");
3847 
3848  const MCExpr *NewOp4 =
3849  MCConstantExpr::create(NewOp4Val, getContext());
3850  Operands[4] = AArch64Operand::CreateImm(
3851  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3852  if (Tok == "bfxil")
3853  Operands[0] = AArch64Operand::CreateToken(
3854  "bfm", false, Op.getStartLoc(), getContext());
3855  else if (Tok == "sbfx")
3856  Operands[0] = AArch64Operand::CreateToken(
3857  "sbfm", false, Op.getStartLoc(), getContext());
3858  else if (Tok == "ubfx")
3859  Operands[0] = AArch64Operand::CreateToken(
3860  "ubfm", false, Op.getStartLoc(), getContext());
3861  else
3862  llvm_unreachable("No valid mnemonic for alias?");
3863  }
3864  }
3865  }
3866  }
3867 
3868  // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
3869  // instruction for FP registers correctly in some rare circumstances. Convert
3870  // it to a safe instruction and warn (because silently changing someone's
3871  // assembly is rude).
3872  if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
3873  NumOperands == 4 && Tok == "movi") {
3874  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3875  AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3876  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3877  if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
3878  (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
3879  StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
3880  if (Suffix.lower() == ".2d" &&
3881  cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
3882  Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
3883  " correctly on this CPU, converting to equivalent movi.16b");
3884  // Switch the suffix to .16b.
3885  unsigned Idx = Op1.isToken() ? 1 : 2;
3886  Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
3887  getContext());
3888  }
3889  }
3890  }
3891 
3892  // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3893  // InstAlias can't quite handle this since the reg classes aren't
3894  // subclasses.
3895  if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3896  // The source register can be Wn here, but the matcher expects a
3897  // GPR64. Twiddle it here if necessary.
3898  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3899  if (Op.isScalarReg()) {
3900  unsigned Reg = getXRegFromWReg(Op.getReg());
3901  Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
3902  Op.getStartLoc(), Op.getEndLoc(),
3903  getContext());
3904  }
3905  }
3906  // FIXME: Likewise for sxt[bh] with a Xd dst operand
3907  else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3908  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3909  if (Op.isScalarReg() &&
3910  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3911  Op.getReg())) {
3912  // The source register can be Wn here, but the matcher expects a
3913  // GPR64. Twiddle it here if necessary.
3914  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3915  if (Op.isScalarReg()) {
3916  unsigned Reg = getXRegFromWReg(Op.getReg());
3917  Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
3918  Op.getStartLoc(),
3919  Op.getEndLoc(), getContext());
3920  }
3921  }
3922  }
3923  // FIXME: Likewise for uxt[bh] with a Xd dst operand
3924  else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3925  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3926  if (Op.isScalarReg() &&
3927  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3928  Op.getReg())) {
3929  // The source register can be Wn here, but the matcher expects a
3930  // GPR32. Twiddle it here if necessary.
3931  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3932  if (Op.isScalarReg()) {
3933  unsigned Reg = getWRegFromXReg(Op.getReg());
3934  Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
3935  Op.getStartLoc(),
3936  Op.getEndLoc(), getContext());
3937  }
3938  }
3939  }
3940 
3941  MCInst Inst;
3942  // First try to match against the secondary set of tables containing the
3943  // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3944  unsigned MatchResult =
3945  MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3946 
3947  // If that fails, try against the alternate table containing long-form NEON:
3948  // "fadd v0.2s, v1.2s, v2.2s"
3949  if (MatchResult != Match_Success) {
3950  // But first, save the short-form match result: we can use it in case the
3951  // long-form match also fails.
3952  auto ShortFormNEONErrorInfo = ErrorInfo;
3953  auto ShortFormNEONMatchResult = MatchResult;
3954 
3955  MatchResult =
3956  MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3957 
3958  // Now, both matches failed, and the long-form match failed on the mnemonic
3959  // suffix token operand. The short-form match failure is probably more
3960  // relevant: use it instead.
3961  if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
3962  Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
3963  ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
3964  MatchResult = ShortFormNEONMatchResult;
3965  ErrorInfo = ShortFormNEONErrorInfo;
3966  }
3967  }
3968 
3969  switch (MatchResult) {
3970  case Match_Success: {
3971  // Perform range checking and other semantic validations
3972  SmallVector<SMLoc, 8> OperandLocs;
3973  NumOperands = Operands.size();
3974  for (unsigned i = 1; i < NumOperands; ++i)
3975  OperandLocs.push_back(Operands[i]->getStartLoc());
3976  if (validateInstruction(Inst, OperandLocs))
3977  return true;
3978 
3979  Inst.setLoc(IDLoc);
3980  Out.EmitInstruction(Inst, getSTI());
3981  return false;
3982  }
3983  case Match_MissingFeature: {
3984  assert(ErrorInfo && "Unknown missing feature!");
3985  // Special case the error message for the very common case where only
3986  // a single subtarget feature is missing (neon, e.g.).
3987  std::string Msg = "instruction requires:";
3988  uint64_t Mask = 1;
3989  for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3990  if (ErrorInfo & Mask) {
3991  Msg += " ";
3992  Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3993  }
3994  Mask <<= 1;
3995  }
3996  return Error(IDLoc, Msg);
3997  }
3998  case Match_MnemonicFail:
3999  return showMatchError(IDLoc, MatchResult, Operands);
4000  case Match_InvalidOperand: {
4001  SMLoc ErrorLoc = IDLoc;
4002 
4003  if (ErrorInfo != ~0ULL) {
4004  if (ErrorInfo >= Operands.size())
4005  return Error(IDLoc, "too few operands for instruction",
4006  SMRange(IDLoc, getTok().getLoc()));
4007 
4008  ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4009  if (ErrorLoc == SMLoc())
4010  ErrorLoc = IDLoc;
4011  }
4012  // If the match failed on a suffix token operand, tweak the diagnostic
4013  // accordingly.
4014  if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4015  ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4016  MatchResult = Match_InvalidSuffix;
4017 
4018  return showMatchError(ErrorLoc, MatchResult, Operands);
4019  }
4020  case Match_InvalidTiedOperand:
4021  case Match_InvalidMemoryIndexed1:
4022  case Match_InvalidMemoryIndexed2:
4023  case Match_InvalidMemoryIndexed4:
4024  case Match_InvalidMemoryIndexed8:
4025  case Match_InvalidMemoryIndexed16:
4026  case Match_InvalidCondCode:
4027  case Match_AddSubRegExtendSmall:
4028  case Match_AddSubRegExtendLarge:
4029  case Match_AddSubSecondSource:
4030  case Match_LogicalSecondSource:
4031  case Match_AddSubRegShift32:
4032  case Match_AddSubRegShift64:
4033  case Match_InvalidMovImm32Shift:
4034  case Match_InvalidMovImm64Shift:
4035  case Match_InvalidFPImm:
4036  case Match_InvalidMemoryWExtend8:
4037  case Match_InvalidMemoryWExtend16:
4038  case Match_InvalidMemoryWExtend32:
4039  case Match_InvalidMemoryWExtend64:
4040  case Match_InvalidMemoryWExtend128:
4041  case Match_InvalidMemoryXExtend8:
4042  case Match_InvalidMemoryXExtend16:
4043  case Match_InvalidMemoryXExtend32:
4044  case Match_InvalidMemoryXExtend64:
4045  case Match_InvalidMemoryXExtend128:
4046  case Match_InvalidMemoryIndexedSImm6:
4047  case Match_InvalidMemoryIndexed4SImm7:
4048  case Match_InvalidMemoryIndexed8SImm7:
4049  case Match_InvalidMemoryIndexed16SImm7:
4050  case Match_InvalidMemoryIndexedSImm9:
4051  case Match_InvalidMemoryIndexedSImm10:
4052  case Match_InvalidImm0_1:
4053  case Match_InvalidImm0_7:
4054  case Match_InvalidImm0_15:
4055  case Match_InvalidImm0_31:
4056  case Match_InvalidImm0_63:
4057  case Match_InvalidImm0_127:
4058  case Match_InvalidImm0_255:
4059  case Match_InvalidImm0_65535:
4060  case Match_InvalidImm1_8:
4061  case Match_InvalidImm1_16:
4062  case Match_InvalidImm1_32:
4063  case Match_InvalidImm1_64:
4064  case Match_InvalidIndex1:
4065  case Match_InvalidIndexB:
4066  case Match_InvalidIndexH:
4067  case Match_InvalidIndexS:
4068  case Match_InvalidIndexD:
4069  case Match_InvalidLabel:
4070  case Match_InvalidComplexRotationEven:
4071  case Match_InvalidComplexRotationOdd:
4072  case Match_InvalidSVEPredicateAnyReg:
4073  case Match_InvalidSVEPattern:
4074  case Match_InvalidSVEPredicateBReg:
4075  case Match_InvalidSVEPredicateHReg:
4076  case Match_InvalidSVEPredicateSReg:
4077  case Match_InvalidSVEPredicateDReg:
4078  case Match_InvalidSVEPredicate3bAnyReg:
4079  case Match_InvalidSVEPredicate3bBReg:
4080  case Match_InvalidSVEPredicate3bHReg:
4081  case Match_InvalidSVEPredicate3bSReg:
4082  case Match_InvalidSVEPredicate3bDReg:
4083  case Match_MSR:
4084  case Match_MRS: {
4085  if (ErrorInfo >= Operands.size())
4086  return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
4087  // Any time we get here, there's nothing fancy to do. Just get the
4088  // operand SMLoc and display the diagnostic.
4089  SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4090  if (ErrorLoc == SMLoc())
4091  ErrorLoc = IDLoc;
4092  return showMatchError(ErrorLoc, MatchResult, Operands);
4093  }
4094  }
4095 
4096  llvm_unreachable("Implement any new match types added!");
4097 }
4098 
4099 /// ParseDirective parses the arm specific directives
4100 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4102  getContext().getObjectFileInfo()->getObjectFileType();
4103  bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4104  bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4105 
4106  StringRef IDVal = DirectiveID.getIdentifier();
4107  SMLoc Loc = DirectiveID.getLoc();
4108  if (IDVal == ".arch")
4109  parseDirectiveArch(Loc);
4110  else if (IDVal == ".cpu")
4111  parseDirectiveCPU(Loc);
4112  else if (IDVal == ".hword")
4113  parseDirectiveWord(2, Loc);
4114  else if (IDVal == ".word")
4115  parseDirectiveWord(4, Loc);
4116  else if (IDVal == ".xword")
4117  parseDirectiveWord(8, Loc);
4118  else if (IDVal == ".tlsdesccall")
4119  parseDirectiveTLSDescCall(Loc);
4120  else if (IDVal == ".ltorg" || IDVal == ".pool")
4121  parseDirectiveLtorg(Loc);
4122  else if (IDVal == ".unreq")
4123  parseDirectiveUnreq(Loc);
4124  else if (!IsMachO && !IsCOFF) {
4125  if (IDVal == ".inst")
4126  parseDirectiveInst(Loc);
4127  else
4128  return true;
4129  } else if (IDVal == MCLOHDirectiveName())
4130  parseDirectiveLOH(IDVal, Loc);
4131  else
4132  return true;
4133  return false;
4134 }
4135 
4136 static const struct {
4137  const char *Name;
4139 } ExtensionMap[] = {
4140  { "crc", {AArch64::FeatureCRC} },
4141  { "crypto", {AArch64::FeatureCrypto} },
4142  { "fp", {AArch64::FeatureFPARMv8} },
4143  { "simd", {AArch64::FeatureNEON} },
4144  { "ras", {AArch64::FeatureRAS} },
4145  { "lse", {AArch64::FeatureLSE} },
4146 
4147  // FIXME: Unsupported extensions
4148  { "pan", {} },
4149  { "lor", {} },
4150  { "rdma", {} },
4151  { "profile", {} },
4152 };
4153 
4154 /// parseDirectiveArch
4155 /// ::= .arch token
4156 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
4157  SMLoc ArchLoc = getLoc();
4158 
4159  StringRef Arch, ExtensionString;
4160  std::tie(Arch, ExtensionString) =
4161  getParser().parseStringToEndOfStatement().trim().split('+');
4162 
4164  if (ID == AArch64::ArchKind::INVALID)
4165  return Error(ArchLoc, "unknown arch name");
4166 
4167  if (parseToken(AsmToken::EndOfStatement))
4168  return true;
4169 
4170  // Get the architecture and extension features.
4171  std::vector<StringRef> AArch64Features;
4172  AArch64::getArchFeatures(ID, AArch64Features);
4174  AArch64Features);
4175 
4176  MCSubtargetInfo &STI = copySTI();
4177  std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
4178  STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
4179 
4180  SmallVector<StringRef, 4> RequestedExtensions;
4181  if (!ExtensionString.empty())
4182  ExtensionString.split(RequestedExtensions, '+');
4183 
4185  for (auto Name : RequestedExtensions) {
4186  bool EnableFeature = true;
4187 
4188  if (Name.startswith_lower("no")) {
4189  EnableFeature = false;
4190  Name = Name.substr(2);
4191  }
4192 
4193  for (const auto &Extension : ExtensionMap) {
4194  if (Extension.Name != Name)
4195  continue;
4196 
4197  if (Extension.Features.none())
4198  report_fatal_error("unsupported architectural extension: " + Name);
4199 
4200  FeatureBitset ToggleFeatures = EnableFeature
4201  ? (~Features & Extension.Features)
4202  : ( Features & Extension.Features);
4203  uint64_t Features =
4204  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
4205  setAvailableFeatures(Features);
4206  break;
4207  }
4208  }
4209  return false;
4210 }
4211 
4212 static SMLoc incrementLoc(SMLoc L, int Offset) {
4213  return SMLoc::getFromPointer(L.getPointer() + Offset);
4214 }
4215 
4216 /// parseDirectiveCPU
4217 /// ::= .cpu id
4218 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
4219  SMLoc CurLoc = getLoc();
4220 
4221  StringRef CPU, ExtensionString;
4222  std::tie(CPU, ExtensionString) =
4223  getParser().parseStringToEndOfStatement().trim().split('+');
4224 
4225  if (parseToken(AsmToken::EndOfStatement))
4226  return true;
4227 
4228  SmallVector<StringRef, 4> RequestedExtensions;
4229  if (!ExtensionString.empty())
4230  ExtensionString.split(RequestedExtensions, '+');
4231 
4232  // FIXME This is using tablegen data, but should be moved to ARMTargetParser
4233  // once that is tablegen'ed
4234  if (!getSTI().isCPUStringValid(CPU)) {
4235  Error(CurLoc, "unknown CPU name");
4236  return false;
4237  }
4238 
4239  MCSubtargetInfo &STI = copySTI();
4240  STI.setDefaultFeatures(CPU, "");
4241  CurLoc = incrementLoc(CurLoc, CPU.size());
4242 
4244  for (auto Name : RequestedExtensions) {
4245  // Advance source location past '+'.
4246  CurLoc = incrementLoc(CurLoc, 1);
4247 
4248  bool EnableFeature = true;
4249 
4250  if (Name.startswith_lower("no")) {
4251  EnableFeature = false;
4252  Name = Name.substr(2);
4253  }
4254 
4255  bool FoundExtension = false;
4256  for (const auto &Extension : ExtensionMap) {
4257  if (Extension.Name != Name)
4258  continue;
4259 
4260  if (Extension.Features.none())
4261  report_fatal_error("unsupported architectural extension: " + Name);
4262 
4263  FeatureBitset ToggleFeatures = EnableFeature
4264  ? (~Features & Extension.Features)
4265  : ( Features & Extension.Features);
4266  uint64_t Features =
4267  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
4268  setAvailableFeatures(Features);
4269  FoundExtension = true;
4270 
4271  break;
4272  }
4273 
4274  if (!FoundExtension)
4275  Error(CurLoc, "unsupported architectural extension");
4276 
4277  CurLoc = incrementLoc(CurLoc, Name.size());
4278  }
4279  return false;
4280 }
4281 
4282 /// parseDirectiveWord
4283 /// ::= .word [ expression (, expression)* ]
4284 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4285  auto parseOp = [&]() -> bool {
4286  const MCExpr *Value;
4287  if (getParser().parseExpression(Value))
4288  return true;
4289  getParser().getStreamer().EmitValue(Value, Size, L);
4290  return false;
4291  };
4292 
4293  if (parseMany(parseOp))
4294  return true;
4295  return false;
4296 }
4297 
4298 /// parseDirectiveInst
4299 /// ::= .inst opcode [, ...]
4300 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4301  if (getLexer().is(AsmToken::EndOfStatement))
4302  return Error(Loc, "expected expression following '.inst' directive");
4303 
4304  auto parseOp = [&]() -> bool {
4305  SMLoc L = getLoc();
4306  const MCExpr *Expr;
4307  if (check(getParser().parseExpression(Expr), L, "expected expression"))
4308  return true;
4309  const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4310  if (check(!Value, L, "expected constant expression"))
4311  return true;
4312  getTargetStreamer().emitInst(Value->getValue());
4313  return false;
4314  };
4315 
4316  if (parseMany(parseOp))
4317  return addErrorSuffix(" in '.inst' directive");
4318  return false;
4319 }
4320 
4321 // parseDirectiveTLSDescCall:
4322 // ::= .tlsdesccall symbol
4323 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4324  StringRef Name;
4325  if (check(getParser().parseIdentifier(Name), L,
4326  "expected symbol after directive") ||
4327  parseToken(AsmToken::EndOfStatement))
4328  return true;
4329 
4330  MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4331  const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4332  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4333 
4334  MCInst Inst;
4335  Inst.setOpcode(AArch64::TLSDESCCALL);
4336  Inst.addOperand(MCOperand::createExpr(Expr));
4337 
4338  getParser().getStreamer().EmitInstruction(Inst, getSTI());
4339  return false;
4340 }
4341 
4342 /// ::= .loh <lohName | lohId> label1, ..., labelN
4343 /// The number of arguments depends on the loh identifier.
4344 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4345  MCLOHType Kind;
4346  if (getParser().getTok().isNot(AsmToken::Identifier)) {
4347  if (getParser().getTok().isNot(AsmToken::Integer))
4348  return TokError("expected an identifier or a number in directive");
4349  // We successfully get a numeric value for the identifier.
4350  // Check if it is valid.
4351  int64_t Id = getParser().getTok().getIntVal();
4352  if (Id <= -1U && !isValidMCLOHType(Id))
4353  return TokError("invalid numeric identifier in directive");
4354  Kind = (MCLOHType)Id;
4355  } else {
4356  StringRef Name = getTok().getIdentifier();
4357  // We successfully parse an identifier.
4358  // Check if it is a recognized one.
4359  int Id = MCLOHNameToId(Name);
4360 
4361  if (Id == -1)
4362  return TokError("invalid identifier in directive");
4363  Kind = (MCLOHType)Id;
4364  }
4365  // Consume the identifier.
4366  Lex();
4367  // Get the number of arguments of this LOH.
4368  int NbArgs = MCLOHIdToNbArgs(Kind);
4369 
4370  assert(NbArgs != -1 && "Invalid number of arguments");
4371 
4373  for (int Idx = 0; Idx < NbArgs; ++Idx) {
4374  StringRef Name;
4375  if (getParser().parseIdentifier(Name))
4376  return TokError("expected identifier in directive");
4377  Args.push_back(getContext().getOrCreateSymbol(Name));
4378 
4379  if (Idx + 1 == NbArgs)
4380  break;
4381  if (parseToken(AsmToken::Comma,
4382  "unexpected token in '" + Twine(IDVal) + "' directive"))
4383  return true;
4384  }
4385  if (parseToken(AsmToken::EndOfStatement,
4386  "unexpected token in '" + Twine(IDVal) + "' directive"))
4387  return true;
4388 
4389  getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4390  return false;
4391 }
4392 
4393 /// parseDirectiveLtorg
4394 /// ::= .ltorg | .pool
4395 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4396  if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
4397  return true;
4398  getTargetStreamer().emitCurrentConstantPool();
4399  return false;
4400 }
4401 
4402 /// parseDirectiveReq
4403 /// ::= name .req registername
4404 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4405  MCAsmParser &Parser = getParser();
4406  Parser.Lex(); // Eat the '.req' token.
4407  SMLoc SRegLoc = getLoc();
4408  int RegNum = tryParseRegister();
4410 
4411  if (RegNum == -1) {
4412  StringRef Kind;
4413  RegisterKind = RegKind::NeonVector;
4414  RegNum = tryMatchVectorRegister(Kind, false);
4415  if (!Kind.empty())
4416  return Error(SRegLoc, "vector register without type specifier expected");
4417  }
4418 
4419  if (RegNum == -1) {
4420  StringRef Kind;
4421  RegisterKind = RegKind::SVEDataVector;
4422  OperandMatchResultTy Res =
4423  tryParseSVERegister(RegNum, Kind, RegKind::SVEDataVector);
4424 
4425  if (Res == MatchOperand_ParseFail)
4426  return true;
4427 
4428  if (Res == MatchOperand_Success && !Kind.empty())
4429  return Error(SRegLoc,
4430  "sve vector register without type specifier expected");
4431  }
4432 
4433  if (RegNum == -1) {
4434  StringRef Kind;
4435  RegisterKind = RegKind::SVEPredicateVector;
4436  OperandMatchResultTy Res =
4437  tryParseSVERegister(RegNum, Kind, RegKind::SVEPredicateVector);
4438 
4439  if (Res == MatchOperand_ParseFail)
4440  return true;
4441 
4442  if (Res == MatchOperand_Success && !Kind.empty())
4443  return Error(SRegLoc,
4444  "sve predicate register without type specifier expected");
4445  }
4446 
4447  if (RegNum == -1)
4448  return Error(SRegLoc, "register name or alias expected");
4449 
4450  // Shouldn't be anything else.
4451  if (parseToken(AsmToken::EndOfStatement,
4452  "unexpected input in .req directive"))
4453  return true;
4454 
4455  auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
4456  if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4457  Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4458 
4459  return false;
4460 }
4461 
4462 /// parseDirectiveUneq
4463 /// ::= .unreq registername
4464 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4465  MCAsmParser &Parser = getParser();
4466  if (getTok().isNot(AsmToken::Identifier))
4467  return TokError("unexpected input in .unreq directive.");
4468  RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4469  Parser.Lex(); // Eat the identifier.
4470  if (parseToken(AsmToken::EndOfStatement))
4471  return addErrorSuffix("in '.unreq' directive");
4472  return false;
4473 }
4474 
4475 bool
4476 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4477  AArch64MCExpr::VariantKind &ELFRefKind,
4478  MCSymbolRefExpr::VariantKind &DarwinRefKind,
4479  int64_t &Addend) {
4480  ELFRefKind = AArch64MCExpr::VK_INVALID;
4481  DarwinRefKind = MCSymbolRefExpr::VK_None;
4482  Addend = 0;
4483 
4484  if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4485  ELFRefKind = AE->getKind();
4486  Expr = AE->getSubExpr();
4487  }
4488 
4489  const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4490  if (SE) {
4491  // It's a simple symbol reference with no addend.
4492  DarwinRefKind = SE->getKind();
4493  return true;
4494  }
4495 
4496  const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4497  if (!BE)
4498  return false;
4499 
4500  SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4501  if (!SE)
4502  return false;
4503  DarwinRefKind = SE->getKind();
4504 
4505  if (BE->getOpcode() != MCBinaryExpr::Add &&
4506  BE->getOpcode() != MCBinaryExpr::Sub)
4507  return false;
4508 
4509  // See if the addend is is a constant, otherwise there's more going
4510  // on here than we can deal with.
4511  auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4512  if (!AddendExpr)
4513  return false;
4514 
4515  Addend = AddendExpr->getValue();
4516  if (BE->getOpcode() == MCBinaryExpr::Sub)
4517  Addend = -Addend;
4518 
4519  // It's some symbol reference + a constant addend, but really
4520  // shouldn't use both Darwin and ELF syntax.
4521  return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4522  DarwinRefKind == MCSymbolRefExpr::VK_None;
4523 }
4524 
4525 /// Force static initialization.
4530 }
4531 
4532 #define GET_REGISTER_MATCHER
4533 #define GET_SUBTARGET_FEATURE_NAME
4534 #define GET_MATCHER_IMPLEMENTATION
4535 #define GET_MNEMONIC_SPELL_CHECKER
4536 #include "AArch64GenAsmMatcher.inc"
4537 
4538 // Define this matcher function after the auto-generated include so we
4539 // have the match class enum definitions.
4540 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4541  unsigned Kind) {
4542  AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4543  // If the kind is a token for a literal immediate, check if our asm
4544  // operand matches. This is for InstAliases which have a fixed-value
4545  // immediate in the syntax.
4546  int64_t ExpectedVal;
4547  switch (Kind) {
4548  default:
4549  return Match_InvalidOperand;
4550  case MCK__35_0:
4551  ExpectedVal = 0;
4552  break;
4553  case MCK__35_1:
4554  ExpectedVal = 1;
4555  break;
4556  case MCK__35_12:
4557  ExpectedVal = 12;
4558  break;
4559  case MCK__35_16:
4560  ExpectedVal = 16;
4561  break;
4562  case MCK__35_2:
4563  ExpectedVal = 2;
4564  break;
4565  case MCK__35_24:
4566  ExpectedVal = 24;
4567  break;
4568  case MCK__35_3:
4569  ExpectedVal = 3;
4570  break;
4571  case MCK__35_32:
4572  ExpectedVal = 32;
4573  break;
4574  case MCK__35_4:
4575  ExpectedVal = 4;
4576  break;
4577  case MCK__35_48:
4578  ExpectedVal = 48;
4579  break;
4580  case MCK__35_6:
4581  ExpectedVal = 6;
4582  break;
4583  case MCK__35_64:
4584  ExpectedVal = 64;
4585  break;
4586  case MCK__35_8:
4587  ExpectedVal = 8;
4588  break;
4589  }
4590  if (!Op.isImm())
4591  return Match_InvalidOperand;
4592  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4593  if (!CE)
4594  return Match_InvalidOperand;
4595  if (CE->getValue() == ExpectedVal)
4596  return Match_Success;
4597  return Match_InvalidOperand;
4598 }
4599 
4601 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4602 
4603  SMLoc S = getLoc();
4604 
4605  if (getParser().getTok().isNot(AsmToken::Identifier)) {
4606  Error(S, "expected register");
4607  return MatchOperand_ParseFail;
4608  }
4609 
4610  int FirstReg = tryParseRegister();
4611  if (FirstReg == -1) {
4612  return MatchOperand_ParseFail;
4613  }
4614  const MCRegisterClass &WRegClass =
4615  AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4616  const MCRegisterClass &XRegClass =
4617  AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4618 
4619  bool isXReg = XRegClass.contains(FirstReg),
4620  isWReg = WRegClass.contains(FirstReg);
4621  if (!isXReg && !isWReg) {
4622  Error(S, "expected first even register of a "
4623  "consecutive same-size even/odd register pair");
4624  return MatchOperand_ParseFail;
4625  }
4626 
4627  const MCRegisterInfo *RI = getContext().getRegisterInfo();
4628  unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4629 
4630  if (FirstEncoding & 0x1) {
4631  Error(S, "expected first even register of a "
4632  "consecutive same-size even/odd register pair");
4633  return MatchOperand_ParseFail;
4634  }
4635 
4636  SMLoc M = getLoc();
4637  if (getParser().getTok().isNot(AsmToken::Comma)) {
4638  Error(M, "expected comma");
4639  return MatchOperand_ParseFail;
4640  }
4641  // Eat the comma
4642  getParser().Lex();
4643 
4644  SMLoc E = getLoc();
4645  int SecondReg = tryParseRegister();
4646  if (SecondReg ==-1) {
4647  return MatchOperand_ParseFail;
4648  }
4649 
4650  if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4651  (isXReg && !XRegClass.contains(SecondReg)) ||
4652  (isWReg && !WRegClass.contains(SecondReg))) {
4653  Error(E,"expected second odd register of a "
4654  "consecutive same-size even/odd register pair");
4655  return MatchOperand_ParseFail;
4656  }
4657 
4658  unsigned Pair = 0;
4659  if (isXReg) {
4660  Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4661  &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4662  } else {
4663  Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4664  &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4665  }
4666 
4667  Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
4668  getLoc(), getContext()));
4669 
4670  return MatchOperand_Success;
4671 }
4672 
4673 template <bool ParseSuffix>
4675 AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
4676  const SMLoc S = getLoc();
4677  // Check for a SVE vector register specifier first.
4678  int RegNum = -1;
4679  StringRef Kind;
4680 
4681  OperandMatchResultTy Res =
4682  tryParseSVERegister(RegNum, Kind, RegKind::SVEDataVector);
4683 
4684  if (Res != MatchOperand_Success)
4685  return Res;
4686 
4687  if (ParseSuffix && Kind.empty())
4688  return MatchOperand_NoMatch;
4689 
4690  unsigned ElementWidth = StringSwitch<unsigned>(Kind.lower())
4691  .Case("", -1)
4692  .Case(".b", 8)
4693  .Case(".h", 16)
4694  .Case(".s", 32)
4695  .Case(".d", 64)
4696  .Case(".q", 128)
4697  .Default(0);
4698  if (!ElementWidth)
4699  return MatchOperand_NoMatch;
4700 
4701  Operands.push_back(
4702  AArch64Operand::CreateReg(RegNum, RegKind::SVEDataVector, ElementWidth,
4703  S, S, getContext()));
4704 
4705  return MatchOperand_Success;
4706 }
4707 
4709 AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
4710  MCAsmParser &Parser = getParser();
4711 
4712  SMLoc SS = getLoc();
4713  const AsmToken &TokE = Parser.getTok();
4714  bool IsHash = TokE.is(AsmToken::Hash);
4715 
4716  if (!IsHash && TokE.isNot(AsmToken::Identifier))
4717  return MatchOperand_NoMatch;
4718 
4719  int64_t Pattern;
4720  if (IsHash) {
4721  Parser.Lex(); // Eat hash
4722 
4723  // Parse the immediate operand.
4724  const MCExpr *ImmVal;
4725  SS = getLoc();
4726  if (Parser.parseExpression(ImmVal))
4727  return MatchOperand_ParseFail;
4728 
4729  auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4730  if (!MCE)
4731  return MatchOperand_ParseFail;
4732 
4733  Pattern = MCE->getValue();
4734  } else {
4735  // Parse the pattern
4736  auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
4737  if (!Pat)
4738  return MatchOperand_NoMatch;
4739 
4740  Parser.Lex();
4741  Pattern = Pat->Encoding;
4742  assert(Pattern >= 0 && Pattern < 32);
4743  }
4744 
4745  Operands.push_back(
4746  AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
4747  SS, getLoc(), getContext()));
4748 
4749  return MatchOperand_Success;
4750 }
static bool isValidVectorKind(StringRef Name)
static bool isReg(const MCInst &MI, unsigned OpNo)
Represents a range in source code.
Definition: SMLoc.h:49
void push_back(const T &Elt)
Definition: SmallVector.h:212
Target & getTheAArch64beTarget()
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static float getFPImmFloat(unsigned Imm)
LLVM_NODISCARD bool startswith_lower(StringRef Prefix) const
Check if this string starts with the given Prefix, ignoring case.
Definition: StringRef.cpp:47
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition: MCAsmLexer.h:116
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:313
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:115
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:42
VariantKind getKind() const
Definition: MCExpr.h:328
LLVM_NODISCARD bool equals_lower(StringRef RHS) const
equals_lower - Check for string equality, ignoring case.
Definition: StringRef.h:176
static const AArch64MCExpr * create(const MCExpr *Expr, VariantKind Kind, MCContext &Ctx)
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
Generic assembler parser interface, for use by target specific assembly parsers.
Definition: MCAsmParser.h:110
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
Target & getTheAArch64leTarget()
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:137
MCTargetAsmParser - Generic interface to target specific assembly parsers.
static CondCode getInvertedCondCode(CondCode Code)
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
Target specific streamer interface.
Definition: MCStreamer.h:81
bool isNot(TokenKind K) const
Definition: MCAsmLexer.h:89
const MCExpr * getLHS() const
Get the left-hand side expression of the binary operator.
Definition: MCExpr.h:554
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
static unsigned getXRegFromWReg(unsigned Reg)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
void changeSign()
Definition: APFloat.h:1050
const AsmToken & getTok() const
Get the current AsmToken from the stream.
Definition: MCAsmParser.cpp:33
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:128
virtual void EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI, bool PrintSchedInfo=false)
Emit the given Instruction into the current section.
Definition: MCStreamer.cpp:875
return AArch64::GPR64RegClass contains(Reg)
static SMLoc incrementLoc(SMLoc L, int Offset)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string...
Definition: MCAsmLexer.h:105
static MCOperand createReg(unsigned Reg)
Definition: MCInst.h:116
static ManagedStatic< DebugCounter > DC
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
bool contains(unsigned Reg) const
contains - Return true if the specified register is included in this register class.
const FeatureBitset & getFeatureBits() const
getFeatureBits - Return the feature bits.
RegisterKind
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:36
Reg
All possible values of the reg field in the ModR/M byte.
Target independent representation for an assembler token.
Definition: MCAsmLexer.h:27
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:165
bool getExtensionFeatures(unsigned Extensions, std::vector< StringRef > &Features)
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
std::string join(IteratorT Begin, IteratorT End, StringRef Separator)
Joins the strings in the range [Begin, End), adding Separator between the elements.
Definition: StringExtras.h:349
Target & getTheARM64Target()
static bool isMem(const MachineInstr &MI, unsigned Op)
Definition: X86InstrInfo.h:144
zlib-gnu style compression
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand...
This file implements a class to represent arbitrary precision integral constant values and operations...
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:65
AArch64::ArchKind parseArch(StringRef Arch)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
Context object for machine code objects.
Definition: MCContext.h:61
std::pair< StringRef, StringRef > getToken(StringRef Source, StringRef Delimiters=" \\\)
getToken - This function extracts one token from source, ignoring any leading characters that appear ...
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool startswith(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:267
LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch & Case(const char(&S)[N], const T &Value)
Definition: StringSwitch.h:74
RegisterMCAsmParser - Helper template for registering a target specific assembly parser, for use in the target machine initialization function.
const MCExpr * getRHS() const
Get the right-hand side expression of the binary operator.
Definition: MCExpr.h:557
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:133
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
SMLoc getLoc() const
Definition: MCAsmLexer.cpp:26
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:918
const MCExpr * getExpr() const
Definition: MCInst.h:96
static const fltSemantics & IEEEdouble() LLVM_READNONE
Definition: APFloat.cpp:122
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:598
MCRegisterClass - Base class of TargetRegisterClass.
FeatureBitset getRequiredFeatures() const
const char * Name
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:159
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
static unsigned getWRegFromXReg(unsigned Reg)
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
FeatureBitset ToggleFeature(uint64_t FB)
ToggleFeature - Toggle a feature and returns the re-computed feature bits.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
const char * getPointer() const
Definition: SMLoc.h:35
int64_t getValue() const
Definition: MCExpr.h:151
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:43
Streaming machine code generation interface.
Definition: MCStreamer.h:181
MCTargetStreamer * getTargetStreamer()
Definition: MCStreamer.h:248
Container class for subtarget features.
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0&#39;s from the least significant bit to the most stopping at the first 1...
Definition: MathExtras.h:112
unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg...
LLVM_NODISCARD StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition: StringRef.h:836
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static const struct @343 ExtensionMap[]
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:149
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static int MCLOHNameToId(StringRef Name)
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:24
static bool isValidSVEKind(StringRef Name)
virtual MCAsmLexer & getLexer()=0
This file declares a class to represent arbitrary precision floating point values and provide a varie...
MCLOHType
Linker Optimization Hint Type.
bool isExpr() const
Definition: MCInst.h:61
int64_t getIntVal() const
Definition: MCAsmLexer.h:121
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition: MCAsmLexer.h:223
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition: APInt.h:443
Binary assembler expressions.
Definition: MCExpr.h:407
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ATTRIBUTE_ALWAYS_INLINE R Default(const T &Value) const
Definition: StringSwitch.h:244
const char * Name
std::enable_if< std::numeric_limits< T >::is_signed, bool >::type getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:497
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef drop_front(size_t N=1) const
Return a StringRef equal to &#39;this&#39; but with the first N elements dropped.
Definition: StringRef.h:645
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
MCStreamer & getStreamer()
Definition: MCStreamer.h:89
void setOpcode(unsigned Op)
Definition: MCInst.h:171
bool isSubRegisterEq(unsigned RegA, unsigned RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:862
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:180
LLVM_NODISCARD std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:727
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition: StringRef.h:710
static StringRef MCLOHDirectiveName()
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:531
StringMap - This is an unconventional map that is specialized for handling keys that are "strings"...
Definition: StringMap.h:222
bool is(TokenKind K) const
Definition: MCAsmLexer.h:88
Class for arbitrary precision integers.
Definition: APInt.h:69
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
const SysReg * lookupSysRegByName(StringRef)
Base class for user error types.
Definition: Error.h:331
uint32_t parseGenericRegister(StringRef Name)
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
loop data Loop Data Prefetch
static SMLoc getFromPointer(const char *Ptr)
Definition: SMLoc.h:37
bool haveFeatures(FeatureBitset ActiveFeatures) const
static bool isAdvSIMDModImmType10(uint64_t Imm)
uint16_t getEncodingValue(unsigned RegNo) const
Returns the encoding for RegNo.
StringRef getABIName() const
getABIName - If this returns a non-empty string this represents the textual name of the ABI that we w...
static const size_t npos
Definition: StringRef.h:51
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:61
#define N
static bool isValidMCLOHType(unsigned Kind)
MCSubtargetInfo - Generic base class for all target subtargets.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
static std::string AArch64MnemonicSpellCheck(StringRef S, uint64_t FBS, unsigned VariantID=0)
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition: APInt.h:449
Opcode getOpcode() const
Get the kind of this binary expression.
Definition: MCExpr.h:551
const unsigned Kind
LLVM_NODISCARD std::string lower() const
Definition: StringRef.cpp:108
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static int MCLOHIdToNbArgs(MCLOHType Kind)
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:290
LLVM Value Representation.
Definition: Value.h:73
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:235
const FeatureBitset Features
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
This class implements an extremely fast bulk output stream that can only output to a stream...
Definition: raw_ostream.h:44
Subtraction.
Definition: MCExpr.h:431
void addOperand(const MCOperand &Op)
Definition: MCInst.h:184
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
Represents a location in source code.
Definition: SMLoc.h:24
static const char * getSubtargetFeatureName(uint64_t Val)
unsigned getOpcode() const
Definition: MCInst.h:172
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition: StringRef.h:298
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:123
bool getArchFeatures(ArchKind AK, std::vector< StringRef > &Features)
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx)
Definition: MCExpr.cpp:159
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
static void parseValidVectorKind(StringRef Name, unsigned &NumElements, char &ElementKind)
void LLVMInitializeAArch64AsmParser()
Force static initialization.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
unsigned getDefaultExtensions(StringRef CPU, ArchKind AK)
void setDefaultFeatures(StringRef CPU, StringRef FS)
Set the features to the default for the given CPU with an appended feature string.