LLVM  7.0.0svn
AArch64AsmParser.cpp
Go to the documentation of this file.
1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
14 #include "Utils/AArch64BaseInfo.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/ADT/StringMap.h"
22 #include "llvm/ADT/StringRef.h"
23 #include "llvm/ADT/StringSwitch.h"
24 #include "llvm/ADT/Twine.h"
25 #include "llvm/MC/MCContext.h"
26 #include "llvm/MC/MCExpr.h"
27 #include "llvm/MC/MCInst.h"
35 #include "llvm/MC/MCRegisterInfo.h"
36 #include "llvm/MC/MCStreamer.h"
38 #include "llvm/MC/MCSymbol.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/Compiler.h"
45 #include "llvm/Support/SMLoc.h"
49 #include <cassert>
50 #include <cctype>
51 #include <cstdint>
52 #include <cstdio>
53 #include <string>
54 #include <tuple>
55 #include <utility>
56 #include <vector>
57 
58 using namespace llvm;
59 
60 namespace {
61 
62 enum class RegKind {
63  Scalar,
64  NeonVector,
65  SVEDataVector,
66  SVEPredicateVector
67 };
68 
69 class AArch64AsmParser : public MCTargetAsmParser {
70 private:
71  StringRef Mnemonic; ///< Instruction mnemonic.
72 
73  // Map of register aliases registers via the .req directive.
75 
76  AArch64TargetStreamer &getTargetStreamer() {
77  MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
78  return static_cast<AArch64TargetStreamer &>(TS);
79  }
80 
81  SMLoc getLoc() const { return getParser().getTok().getLoc(); }
82 
83  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
84  void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
85  AArch64CC::CondCode parseCondCodeString(StringRef Cond);
86  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
87  unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
88  bool parseRegister(OperandVector &Operands);
89  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
90  bool parseNeonVectorList(OperandVector &Operands);
91  bool parseOptionalMulOperand(OperandVector &Operands);
92  bool parseOperand(OperandVector &Operands, bool isCondCode,
93  bool invertCondCode);
94 
95  bool showMatchError(SMLoc Loc, unsigned ErrCode, OperandVector &Operands);
96 
97  bool parseDirectiveArch(SMLoc L);
98  bool parseDirectiveCPU(SMLoc L);
99  bool parseDirectiveInst(SMLoc L);
100 
101  bool parseDirectiveTLSDescCall(SMLoc L);
102 
103  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
104  bool parseDirectiveLtorg(SMLoc L);
105 
106  bool parseDirectiveReq(StringRef Name, SMLoc L);
107  bool parseDirectiveUnreq(SMLoc L);
108 
109  bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
110  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
111  OperandVector &Operands, MCStreamer &Out,
112  uint64_t &ErrorInfo,
113  bool MatchingInlineAsm) override;
114 /// @name Auto-generated Match Functions
115 /// {
116 
117 #define GET_ASSEMBLER_HEADER
118 #include "AArch64GenAsmMatcher.inc"
119 
120  /// }
121 
122  OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
123  OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
124  RegKind MatchKind);
125  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
126  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
127  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
128  OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
129  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
130  template <bool IsSVEPrefetch = false>
131  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
132  OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
133  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
134  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
135  template<bool AddFPZeroAsLiteral>
136  OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
137  OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
138  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
139  bool tryParseNeonVectorRegister(OperandVector &Operands);
140  OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
141  OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
142  template <bool ParseShiftExtend>
143  OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
144  template <bool ParseShiftExtend, bool ParseSuffix>
145  OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
146  OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
147  template <RegKind VectorKind>
148  OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
149  bool ExpectMatch = false);
150  OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
151 
152 public:
153  enum AArch64MatchResultTy {
154  Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
155 #define GET_OPERAND_DIAGNOSTIC_TYPES
156 #include "AArch64GenAsmMatcher.inc"
157  };
158  bool IsILP32;
159 
160  AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
161  const MCInstrInfo &MII, const MCTargetOptions &Options)
162  : MCTargetAsmParser(Options, STI, MII) {
163  IsILP32 = Options.getABIName() == "ilp32";
165  MCStreamer &S = getParser().getStreamer();
166  if (S.getTargetStreamer() == nullptr)
167  new AArch64TargetStreamer(S);
168 
169  // Alias .hword/.word/xword to the target-independent .2byte/.4byte/.8byte
170  // directives as they have the same form and semantics:
171  /// ::= (.hword | .word | .xword ) [ expression (, expression)* ]
172  Parser.addAliasForDirective(".hword", ".2byte");
173  Parser.addAliasForDirective(".word", ".4byte");
174  Parser.addAliasForDirective(".xword", ".8byte");
175 
176  // Initialize the set of available features.
177  setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
178  }
179 
180  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
181  SMLoc NameLoc, OperandVector &Operands) override;
182  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
183  bool ParseDirective(AsmToken DirectiveID) override;
184  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
185  unsigned Kind) override;
186 
187  static bool classifySymbolRef(const MCExpr *Expr,
188  AArch64MCExpr::VariantKind &ELFRefKind,
189  MCSymbolRefExpr::VariantKind &DarwinRefKind,
190  int64_t &Addend);
191 };
192 
193 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
194 /// instruction.
195 class AArch64Operand : public MCParsedAsmOperand {
196 private:
197  enum KindTy {
198  k_Immediate,
199  k_ShiftedImm,
200  k_CondCode,
201  k_Register,
202  k_VectorList,
203  k_VectorIndex,
204  k_Token,
205  k_SysReg,
206  k_SysCR,
207  k_Prefetch,
208  k_ShiftExtend,
209  k_FPImm,
210  k_Barrier,
211  k_PSBHint,
212  } Kind;
213 
214  SMLoc StartLoc, EndLoc;
215 
216  struct TokOp {
217  const char *Data;
218  unsigned Length;
219  bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
220  };
221 
222  // Separate shift/extend operand.
223  struct ShiftExtendOp {
225  unsigned Amount;
226  bool HasExplicitAmount;
227  };
228 
229  struct RegOp {
230  unsigned RegNum;
231  RegKind Kind;
232  int ElementWidth;
233 
234  // In some cases the shift/extend needs to be explicitly parsed together
235  // with the register, rather than as a separate operand. This is needed
236  // for addressing modes where the instruction as a whole dictates the
237  // scaling/extend, rather than specific bits in the instruction.
238  // By parsing them as a single operand, we avoid the need to pass an
239  // extra operand in all CodeGen patterns (because all operands need to
240  // have an associated value), and we avoid the need to update TableGen to
241  // accept operands that have no associated bits in the instruction.
242  //
243  // An added benefit of parsing them together is that the assembler
244  // can give a sensible diagnostic if the scaling is not correct.
245  //
246  // The default is 'lsl #0' (HasExplicitAmount = false) if no
247  // ShiftExtend is specified.
248  ShiftExtendOp ShiftExtend;
249  };
250 
251  struct VectorListOp {
252  unsigned RegNum;
253  unsigned Count;
254  unsigned NumElements;
255  unsigned ElementWidth;
257  };
258 
259  struct VectorIndexOp {
260  unsigned Val;
261  };
262 
263  struct ImmOp {
264  const MCExpr *Val;
265  };
266 
267  struct ShiftedImmOp {
268  const MCExpr *Val;
269  unsigned ShiftAmount;
270  };
271 
272  struct CondCodeOp {
273  AArch64CC::CondCode Code;
274  };
275 
276  struct FPImmOp {
277  uint64_t Val; // APFloat value bitcasted to uint64_t.
278  bool IsExact; // describes whether parsed value was exact.
279  };
280 
281  struct BarrierOp {
282  const char *Data;
283  unsigned Length;
284  unsigned Val; // Not the enum since not all values have names.
285  };
286 
287  struct SysRegOp {
288  const char *Data;
289  unsigned Length;
290  uint32_t MRSReg;
291  uint32_t MSRReg;
292  uint32_t PStateField;
293  };
294 
295  struct SysCRImmOp {
296  unsigned Val;
297  };
298 
299  struct PrefetchOp {
300  const char *Data;
301  unsigned Length;
302  unsigned Val;
303  };
304 
305  struct PSBHintOp {
306  const char *Data;
307  unsigned Length;
308  unsigned Val;
309  };
310 
311  struct ExtendOp {
312  unsigned Val;
313  };
314 
315  union {
316  struct TokOp Tok;
317  struct RegOp Reg;
318  struct VectorListOp VectorList;
319  struct VectorIndexOp VectorIndex;
320  struct ImmOp Imm;
321  struct ShiftedImmOp ShiftedImm;
322  struct CondCodeOp CondCode;
323  struct FPImmOp FPImm;
324  struct BarrierOp Barrier;
325  struct SysRegOp SysReg;
326  struct SysCRImmOp SysCRImm;
327  struct PrefetchOp Prefetch;
328  struct PSBHintOp PSBHint;
329  struct ShiftExtendOp ShiftExtend;
330  };
331 
332  // Keep the MCContext around as the MCExprs may need manipulated during
333  // the add<>Operands() calls.
334  MCContext &Ctx;
335 
336 public:
337  AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
338 
339  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
340  Kind = o.Kind;
341  StartLoc = o.StartLoc;
342  EndLoc = o.EndLoc;
343  switch (Kind) {
344  case k_Token:
345  Tok = o.Tok;
346  break;
347  case k_Immediate:
348  Imm = o.Imm;
349  break;
350  case k_ShiftedImm:
351  ShiftedImm = o.ShiftedImm;
352  break;
353  case k_CondCode:
354  CondCode = o.CondCode;
355  break;
356  case k_FPImm:
357  FPImm = o.FPImm;
358  break;
359  case k_Barrier:
360  Barrier = o.Barrier;
361  break;
362  case k_Register:
363  Reg = o.Reg;
364  break;
365  case k_VectorList:
366  VectorList = o.VectorList;
367  break;
368  case k_VectorIndex:
369  VectorIndex = o.VectorIndex;
370  break;
371  case k_SysReg:
372  SysReg = o.SysReg;
373  break;
374  case k_SysCR:
375  SysCRImm = o.SysCRImm;
376  break;
377  case k_Prefetch:
378  Prefetch = o.Prefetch;
379  break;
380  case k_PSBHint:
381  PSBHint = o.PSBHint;
382  break;
383  case k_ShiftExtend:
384  ShiftExtend = o.ShiftExtend;
385  break;
386  }
387  }
388 
389  /// getStartLoc - Get the location of the first token of this operand.
390  SMLoc getStartLoc() const override { return StartLoc; }
391  /// getEndLoc - Get the location of the last token of this operand.
392  SMLoc getEndLoc() const override { return EndLoc; }
393 
394  StringRef getToken() const {
395  assert(Kind == k_Token && "Invalid access!");
396  return StringRef(Tok.Data, Tok.Length);
397  }
398 
399  bool isTokenSuffix() const {
400  assert(Kind == k_Token && "Invalid access!");
401  return Tok.IsSuffix;
402  }
403 
404  const MCExpr *getImm() const {
405  assert(Kind == k_Immediate && "Invalid access!");
406  return Imm.Val;
407  }
408 
409  const MCExpr *getShiftedImmVal() const {
410  assert(Kind == k_ShiftedImm && "Invalid access!");
411  return ShiftedImm.Val;
412  }
413 
414  unsigned getShiftedImmShift() const {
415  assert(Kind == k_ShiftedImm && "Invalid access!");
416  return ShiftedImm.ShiftAmount;
417  }
418 
419  AArch64CC::CondCode getCondCode() const {
420  assert(Kind == k_CondCode && "Invalid access!");
421  return CondCode.Code;
422  }
423 
424  APFloat getFPImm() const {
425  assert (Kind == k_FPImm && "Invalid access!");
426  return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
427  }
428 
429  bool getFPImmIsExact() const {
430  assert (Kind == k_FPImm && "Invalid access!");
431  return FPImm.IsExact;
432  }
433 
434  unsigned getBarrier() const {
435  assert(Kind == k_Barrier && "Invalid access!");
436  return Barrier.Val;
437  }
438 
439  StringRef getBarrierName() const {
440  assert(Kind == k_Barrier && "Invalid access!");
441  return StringRef(Barrier.Data, Barrier.Length);
442  }
443 
444  unsigned getReg() const override {
445  assert(Kind == k_Register && "Invalid access!");
446  return Reg.RegNum;
447  }
448 
449  unsigned getVectorListStart() const {
450  assert(Kind == k_VectorList && "Invalid access!");
451  return VectorList.RegNum;
452  }
453 
454  unsigned getVectorListCount() const {
455  assert(Kind == k_VectorList && "Invalid access!");
456  return VectorList.Count;
457  }
458 
459  unsigned getVectorIndex() const {
460  assert(Kind == k_VectorIndex && "Invalid access!");
461  return VectorIndex.Val;
462  }
463 
464  StringRef getSysReg() const {
465  assert(Kind == k_SysReg && "Invalid access!");
466  return StringRef(SysReg.Data, SysReg.Length);
467  }
468 
469  unsigned getSysCR() const {
470  assert(Kind == k_SysCR && "Invalid access!");
471  return SysCRImm.Val;
472  }
473 
474  unsigned getPrefetch() const {
475  assert(Kind == k_Prefetch && "Invalid access!");
476  return Prefetch.Val;
477  }
478 
479  unsigned getPSBHint() const {
480  assert(Kind == k_PSBHint && "Invalid access!");
481  return PSBHint.Val;
482  }
483 
484  StringRef getPSBHintName() const {
485  assert(Kind == k_PSBHint && "Invalid access!");
486  return StringRef(PSBHint.Data, PSBHint.Length);
487  }
488 
489  StringRef getPrefetchName() const {
490  assert(Kind == k_Prefetch && "Invalid access!");
491  return StringRef(Prefetch.Data, Prefetch.Length);
492  }
493 
494  AArch64_AM::ShiftExtendType getShiftExtendType() const {
495  if (Kind == k_ShiftExtend)
496  return ShiftExtend.Type;
497  if (Kind == k_Register)
498  return Reg.ShiftExtend.Type;
499  llvm_unreachable("Invalid access!");
500  }
501 
502  unsigned getShiftExtendAmount() const {
503  if (Kind == k_ShiftExtend)
504  return ShiftExtend.Amount;
505  if (Kind == k_Register)
506  return Reg.ShiftExtend.Amount;
507  llvm_unreachable("Invalid access!");
508  }
509 
510  bool hasShiftExtendAmount() const {
511  if (Kind == k_ShiftExtend)
512  return ShiftExtend.HasExplicitAmount;
513  if (Kind == k_Register)
514  return Reg.ShiftExtend.HasExplicitAmount;
515  llvm_unreachable("Invalid access!");
516  }
517 
518  bool isImm() const override { return Kind == k_Immediate; }
519  bool isMem() const override { return false; }
520 
521  template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
522 
523  template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
524  return isImmScaled<Bits, Scale>(true);
525  }
526 
527  template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
528  return isImmScaled<Bits, Scale>(false);
529  }
530 
531  template <int Bits, int Scale>
532  DiagnosticPredicate isImmScaled(bool Signed) const {
533  if (!isImm())
535 
536  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
537  if (!MCE)
539 
540  int64_t MinVal, MaxVal;
541  if (Signed) {
542  int64_t Shift = Bits - 1;
543  MinVal = (int64_t(1) << Shift) * -Scale;
544  MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
545  } else {
546  MinVal = 0;
547  MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
548  }
549 
550  int64_t Val = MCE->getValue();
551  if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
553 
555  }
556 
557  bool isSVEPattern() const {
558  if (!isImm())
559  return false;
560  auto *MCE = dyn_cast<MCConstantExpr>(getImm());
561  if (!MCE)
562  return false;
563  int64_t Val = MCE->getValue();
564  return Val >= 0 && Val < 32;
565  }
566 
567  bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
568  AArch64MCExpr::VariantKind ELFRefKind;
569  MCSymbolRefExpr::VariantKind DarwinRefKind;
570  int64_t Addend;
571  if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
572  Addend)) {
573  // If we don't understand the expression, assume the best and
574  // let the fixup and relocation code deal with it.
575  return true;
576  }
577 
578  if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
579  ELFRefKind == AArch64MCExpr::VK_LO12 ||
580  ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
581  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
582  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
583  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
584  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
585  ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
586  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
587  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
588  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
589  // Note that we don't range-check the addend. It's adjusted modulo page
590  // size when converted, so there is no "out of range" condition when using
591  // @pageoff.
592  return Addend >= 0 && (Addend % Scale) == 0;
593  } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
594  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
595  // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
596  return Addend == 0;
597  }
598 
599  return false;
600  }
601 
602  template <int Scale> bool isUImm12Offset() const {
603  if (!isImm())
604  return false;
605 
606  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
607  if (!MCE)
608  return isSymbolicUImm12Offset(getImm(), Scale);
609 
610  int64_t Val = MCE->getValue();
611  return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
612  }
613 
614  template <int N, int M>
615  bool isImmInRange() const {
616  if (!isImm())
617  return false;
618  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
619  if (!MCE)
620  return false;
621  int64_t Val = MCE->getValue();
622  return (Val >= N && Val <= M);
623  }
624 
625  // NOTE: Also used for isLogicalImmNot as anything that can be represented as
626  // a logical immediate can always be represented when inverted.
627  template <typename T>
628  bool isLogicalImm() const {
629  if (!isImm())
630  return false;
631  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
632  if (!MCE)
633  return false;
634 
635  int64_t Val = MCE->getValue();
636  int64_t SVal = typename std::make_signed<T>::type(Val);
637  int64_t UVal = typename std::make_unsigned<T>::type(Val);
638  if (Val != SVal && Val != UVal)
639  return false;
640 
641  return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8);
642  }
643 
644  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
645 
646  /// Returns the immediate value as a pair of (imm, shift) if the immediate is
647  /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
648  /// immediate that can be shifted by 'Shift'.
649  template <unsigned Width>
650  Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
651  if (isShiftedImm() && Width == getShiftedImmShift())
652  if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
653  return std::make_pair(CE->getValue(), Width);
654 
655  if (isImm())
656  if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
657  int64_t Val = CE->getValue();
658  if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
659  return std::make_pair(Val >> Width, Width);
660  else
661  return std::make_pair(Val, 0u);
662  }
663 
664  return {};
665  }
666 
667  bool isAddSubImm() const {
668  if (!isShiftedImm() && !isImm())
669  return false;
670 
671  const MCExpr *Expr;
672 
673  // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
674  if (isShiftedImm()) {
675  unsigned Shift = ShiftedImm.ShiftAmount;
676  Expr = ShiftedImm.Val;
677  if (Shift != 0 && Shift != 12)
678  return false;
679  } else {
680  Expr = getImm();
681  }
682 
683  AArch64MCExpr::VariantKind ELFRefKind;
684  MCSymbolRefExpr::VariantKind DarwinRefKind;
685  int64_t Addend;
686  if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
687  DarwinRefKind, Addend)) {
688  return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
689  || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
690  || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
691  || ELFRefKind == AArch64MCExpr::VK_LO12
692  || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
693  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
694  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
695  || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
696  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
697  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
698  || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
699  || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
700  || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
701  }
702 
703  // If it's a constant, it should be a real immediate in range.
704  if (auto ShiftedVal = getShiftedVal<12>())
705  return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
706 
707  // If it's an expression, we hope for the best and let the fixup/relocation
708  // code deal with it.
709  return true;
710  }
711 
712  bool isAddSubImmNeg() const {
713  if (!isShiftedImm() && !isImm())
714  return false;
715 
716  // Otherwise it should be a real negative immediate in range.
717  if (auto ShiftedVal = getShiftedVal<12>())
718  return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
719 
720  return false;
721  }
722 
723  // Signed value in the range -128 to +127. For element widths of
724  // 16 bits or higher it may also be a signed multiple of 256 in the
725  // range -32768 to +32512.
726  // For element-width of 8 bits a range of -128 to 255 is accepted,
727  // since a copy of a byte can be either signed/unsigned.
728  template <typename T>
730  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
732 
733  bool IsByte =
734  std::is_same<int8_t, typename std::make_signed<T>::type>::value;
735  if (auto ShiftedImm = getShiftedVal<8>())
736  if (!(IsByte && ShiftedImm->second) &&
737  AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
738  << ShiftedImm->second))
740 
742  }
743 
744  // Unsigned value in the range 0 to 255. For element widths of
745  // 16 bits or higher it may also be a signed multiple of 256 in the
746  // range 0 to 65280.
747  template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
748  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
750 
751  bool IsByte =
752  std::is_same<int8_t, typename std::make_signed<T>::type>::value;
753  if (auto ShiftedImm = getShiftedVal<8>())
754  if (!(IsByte && ShiftedImm->second) &&
755  AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
756  << ShiftedImm->second))
758 
760  }
761 
762  template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
763  if (isLogicalImm<T>() && !isSVECpyImm<T>())
766  }
767 
768  bool isCondCode() const { return Kind == k_CondCode; }
769 
770  bool isSIMDImmType10() const {
771  if (!isImm())
772  return false;
773  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
774  if (!MCE)
775  return false;
777  }
778 
779  template<int N>
780  bool isBranchTarget() const {
781  if (!isImm())
782  return false;
783  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
784  if (!MCE)
785  return true;
786  int64_t Val = MCE->getValue();
787  if (Val & 0x3)
788  return false;
789  assert(N > 0 && "Branch target immediate cannot be 0 bits!");
790  return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
791  }
792 
793  bool
794  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
795  if (!isImm())
796  return false;
797 
798  AArch64MCExpr::VariantKind ELFRefKind;
799  MCSymbolRefExpr::VariantKind DarwinRefKind;
800  int64_t Addend;
801  if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
802  DarwinRefKind, Addend)) {
803  return false;
804  }
805  if (DarwinRefKind != MCSymbolRefExpr::VK_None)
806  return false;
807 
808  for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
809  if (ELFRefKind == AllowedModifiers[i])
810  return Addend == 0;
811  }
812 
813  return false;
814  }
815 
816  bool isMovZSymbolG3() const {
817  return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
818  }
819 
820  bool isMovZSymbolG2() const {
824  }
825 
826  bool isMovZSymbolG1() const {
827  return isMovWSymbol({
831  });
832  }
833 
834  bool isMovZSymbolG0() const {
838  }
839 
840  bool isMovKSymbolG3() const {
841  return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
842  }
843 
844  bool isMovKSymbolG2() const {
845  return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
846  }
847 
848  bool isMovKSymbolG1() const {
849  return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
852  }
853 
854  bool isMovKSymbolG0() const {
855  return isMovWSymbol(
858  }
859 
860  template<int RegWidth, int Shift>
861  bool isMOVZMovAlias() const {
862  if (!isImm()) return false;
863 
864  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
865  if (!CE) return false;
866  uint64_t Value = CE->getValue();
867 
868  return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
869  }
870 
871  template<int RegWidth, int Shift>
872  bool isMOVNMovAlias() const {
873  if (!isImm()) return false;
874 
875  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
876  if (!CE) return false;
877  uint64_t Value = CE->getValue();
878 
879  return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
880  }
881 
882  bool isFPImm() const {
883  return Kind == k_FPImm &&
884  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
885  }
886 
887  bool isBarrier() const { return Kind == k_Barrier; }
888  bool isSysReg() const { return Kind == k_SysReg; }
889 
890  bool isMRSSystemRegister() const {
891  if (!isSysReg()) return false;
892 
893  return SysReg.MRSReg != -1U;
894  }
895 
896  bool isMSRSystemRegister() const {
897  if (!isSysReg()) return false;
898  return SysReg.MSRReg != -1U;
899  }
900 
901  bool isSystemPStateFieldWithImm0_1() const {
902  if (!isSysReg()) return false;
903  return (SysReg.PStateField == AArch64PState::PAN ||
904  SysReg.PStateField == AArch64PState::UAO);
905  }
906 
907  bool isSystemPStateFieldWithImm0_15() const {
908  if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
909  return SysReg.PStateField != -1U;
910  }
911 
912  bool isReg() const override {
913  return Kind == k_Register;
914  }
915 
916  bool isScalarReg() const {
917  return Kind == k_Register && Reg.Kind == RegKind::Scalar;
918  }
919 
920  bool isNeonVectorReg() const {
921  return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
922  }
923 
924  bool isNeonVectorRegLo() const {
925  return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
926  AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
927  Reg.RegNum);
928  }
929 
930  template <unsigned Class> bool isSVEVectorReg() const {
931  RegKind RK;
932  switch (Class) {
933  case AArch64::ZPRRegClassID:
934  RK = RegKind::SVEDataVector;
935  break;
936  case AArch64::PPRRegClassID:
937  case AArch64::PPR_3bRegClassID:
938  RK = RegKind::SVEPredicateVector;
939  break;
940  default:
941  llvm_unreachable("Unsupport register class");
942  }
943 
944  return (Kind == k_Register && Reg.Kind == RK) &&
945  AArch64MCRegisterClasses[Class].contains(getReg());
946  }
947 
948  template <unsigned Class> bool isFPRasZPR() const {
949  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
950  AArch64MCRegisterClasses[Class].contains(getReg());
951  }
952 
953  template <int ElementWidth, unsigned Class>
954  DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
955  if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
957 
958  if (isSVEVectorReg<Class>() &&
959  (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
961 
963  }
964 
965  template <int ElementWidth, unsigned Class>
966  DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
967  if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
969 
970  if (isSVEVectorReg<Class>() &&
971  (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
973 
975  }
976 
977  template <int ElementWidth, unsigned Class,
978  AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
979  bool ShiftWidthAlwaysSame>
980  DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
981  auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
982  if (!VectorMatch.isMatch())
984 
985  // Give a more specific diagnostic when the user has explicitly typed in
986  // a shift-amount that does not match what is expected, but for which
987  // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
988  bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
989  if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
990  ShiftExtendTy == AArch64_AM::SXTW) &&
991  !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
993 
994  if (MatchShift && ShiftExtendTy == getShiftExtendType())
996 
998  }
999 
1000  bool isGPR32as64() const {
1001  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1002  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1003  }
1004 
1005  bool isWSeqPair() const {
1006  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1007  AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1008  Reg.RegNum);
1009  }
1010 
1011  bool isXSeqPair() const {
1012  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1013  AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1014  Reg.RegNum);
1015  }
1016 
1017  template<int64_t Angle, int64_t Remainder>
1018  bool isComplexRotation() const {
1019  if (!isImm()) return false;
1020 
1021  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1022  if (!CE) return false;
1023  uint64_t Value = CE->getValue();
1024 
1025  return (Value % Angle == Remainder && Value <= 270);
1026  }
1027 
1028  template <unsigned RegClassID> bool isGPR64() const {
1029  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1030  AArch64MCRegisterClasses[RegClassID].contains(getReg());
1031  }
1032 
1033  template <unsigned RegClassID, int ExtWidth>
1034  DiagnosticPredicate isGPR64WithShiftExtend() const {
1035  if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1037 
1038  if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1039  getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1042  }
1043 
1044  /// Is this a vector list with the type implicit (presumably attached to the
1045  /// instruction itself)?
1046  template <RegKind VectorKind, unsigned NumRegs>
1047  bool isImplicitlyTypedVectorList() const {
1048  return Kind == k_VectorList && VectorList.Count == NumRegs &&
1049  VectorList.NumElements == 0 &&
1050  VectorList.RegisterKind == VectorKind;
1051  }
1052 
1053  template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1054  unsigned ElementWidth>
1055  bool isTypedVectorList() const {
1056  if (Kind != k_VectorList)
1057  return false;
1058  if (VectorList.Count != NumRegs)
1059  return false;
1060  if (VectorList.RegisterKind != VectorKind)
1061  return false;
1062  if (VectorList.ElementWidth != ElementWidth)
1063  return false;
1064  return VectorList.NumElements == NumElements;
1065  }
1066 
1067  template <int Min, int Max>
1068  DiagnosticPredicate isVectorIndex() const {
1069  if (Kind != k_VectorIndex)
1071  if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1074  }
1075 
1076  bool isToken() const override { return Kind == k_Token; }
1077 
1078  bool isTokenEqual(StringRef Str) const {
1079  return Kind == k_Token && getToken() == Str;
1080  }
1081  bool isSysCR() const { return Kind == k_SysCR; }
1082  bool isPrefetch() const { return Kind == k_Prefetch; }
1083  bool isPSBHint() const { return Kind == k_PSBHint; }
1084  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1085  bool isShifter() const {
1086  if (!isShiftExtend())
1087  return false;
1088 
1089  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1090  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1091  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1092  ST == AArch64_AM::MSL);
1093  }
1094 
1095  template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1096  if (Kind != k_FPImm)
1098 
1099  if (getFPImmIsExact()) {
1100  // Lookup the immediate from table of supported immediates.
1101  auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1102  assert(Desc && "Unknown enum value");
1103 
1104  // Calculate its FP value.
1105  APFloat RealVal(APFloat::IEEEdouble());
1106  if (RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero) !=
1107  APFloat::opOK)
1108  llvm_unreachable("FP immediate is not exact");
1109 
1110  if (getFPImm().bitwiseIsEqual(RealVal))
1112  }
1113 
1115  }
1116 
1117  template <unsigned ImmA, unsigned ImmB>
1118  DiagnosticPredicate isExactFPImm() const {
1120  if ((Res = isExactFPImm<ImmA>()))
1122  if ((Res = isExactFPImm<ImmB>()))
1124  return Res;
1125  }
1126 
1127  bool isExtend() const {
1128  if (!isShiftExtend())
1129  return false;
1130 
1131  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1132  return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1133  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1134  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1135  ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1136  ET == AArch64_AM::LSL) &&
1137  getShiftExtendAmount() <= 4;
1138  }
1139 
1140  bool isExtend64() const {
1141  if (!isExtend())
1142  return false;
1143  // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
1144  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1145  return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
1146  }
1147 
1148  bool isExtendLSL64() const {
1149  if (!isExtend())
1150  return false;
1151  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1152  return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1153  ET == AArch64_AM::LSL) &&
1154  getShiftExtendAmount() <= 4;
1155  }
1156 
1157  template<int Width> bool isMemXExtend() const {
1158  if (!isExtend())
1159  return false;
1160  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1161  return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1162  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1163  getShiftExtendAmount() == 0);
1164  }
1165 
1166  template<int Width> bool isMemWExtend() const {
1167  if (!isExtend())
1168  return false;
1169  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1170  return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1171  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1172  getShiftExtendAmount() == 0);
1173  }
1174 
1175  template <unsigned width>
1176  bool isArithmeticShifter() const {
1177  if (!isShifter())
1178  return false;
1179 
1180  // An arithmetic shifter is LSL, LSR, or ASR.
1181  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1182  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1183  ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1184  }
1185 
1186  template <unsigned width>
1187  bool isLogicalShifter() const {
1188  if (!isShifter())
1189  return false;
1190 
1191  // A logical shifter is LSL, LSR, ASR or ROR.
1192  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1193  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1194  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1195  getShiftExtendAmount() < width;
1196  }
1197 
1198  bool isMovImm32Shifter() const {
1199  if (!isShifter())
1200  return false;
1201 
1202  // A MOVi shifter is LSL of 0, 16, 32, or 48.
1203  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1204  if (ST != AArch64_AM::LSL)
1205  return false;
1206  uint64_t Val = getShiftExtendAmount();
1207  return (Val == 0 || Val == 16);
1208  }
1209 
1210  bool isMovImm64Shifter() const {
1211  if (!isShifter())
1212  return false;
1213 
1214  // A MOVi shifter is LSL of 0 or 16.
1215  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1216  if (ST != AArch64_AM::LSL)
1217  return false;
1218  uint64_t Val = getShiftExtendAmount();
1219  return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1220  }
1221 
1222  bool isLogicalVecShifter() const {
1223  if (!isShifter())
1224  return false;
1225 
1226  // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1227  unsigned Shift = getShiftExtendAmount();
1228  return getShiftExtendType() == AArch64_AM::LSL &&
1229  (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1230  }
1231 
1232  bool isLogicalVecHalfWordShifter() const {
1233  if (!isLogicalVecShifter())
1234  return false;
1235 
1236  // A logical vector shifter is a left shift by 0 or 8.
1237  unsigned Shift = getShiftExtendAmount();
1238  return getShiftExtendType() == AArch64_AM::LSL &&
1239  (Shift == 0 || Shift == 8);
1240  }
1241 
1242  bool isMoveVecShifter() const {
1243  if (!isShiftExtend())
1244  return false;
1245 
1246  // A logical vector shifter is a left shift by 8 or 16.
1247  unsigned Shift = getShiftExtendAmount();
1248  return getShiftExtendType() == AArch64_AM::MSL &&
1249  (Shift == 8 || Shift == 16);
1250  }
1251 
1252  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1253  // to LDUR/STUR when the offset is not legal for the former but is for
1254  // the latter. As such, in addition to checking for being a legal unscaled
1255  // address, also check that it is not a legal scaled address. This avoids
1256  // ambiguity in the matcher.
1257  template<int Width>
1258  bool isSImm9OffsetFB() const {
1259  return isSImm<9>() && !isUImm12Offset<Width / 8>();
1260  }
1261 
1262  bool isAdrpLabel() const {
1263  // Validation was handled during parsing, so we just sanity check that
1264  // something didn't go haywire.
1265  if (!isImm())
1266  return false;
1267 
1268  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1269  int64_t Val = CE->getValue();
1270  int64_t Min = - (4096 * (1LL << (21 - 1)));
1271  int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1272  return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1273  }
1274 
1275  return true;
1276  }
1277 
1278  bool isAdrLabel() const {
1279  // Validation was handled during parsing, so we just sanity check that
1280  // something didn't go haywire.
1281  if (!isImm())
1282  return false;
1283 
1284  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1285  int64_t Val = CE->getValue();
1286  int64_t Min = - (1LL << (21 - 1));
1287  int64_t Max = ((1LL << (21 - 1)) - 1);
1288  return Val >= Min && Val <= Max;
1289  }
1290 
1291  return true;
1292  }
1293 
1294  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1295  // Add as immediates when possible. Null MCExpr = 0.
1296  if (!Expr)
1298  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1299  Inst.addOperand(MCOperand::createImm(CE->getValue()));
1300  else
1301  Inst.addOperand(MCOperand::createExpr(Expr));
1302  }
1303 
1304  void addRegOperands(MCInst &Inst, unsigned N) const {
1305  assert(N == 1 && "Invalid number of operands!");
1307  }
1308 
1309  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1310  assert(N == 1 && "Invalid number of operands!");
1311  assert(
1312  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1313 
1314  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1315  uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1316  RI->getEncodingValue(getReg()));
1317 
1318  Inst.addOperand(MCOperand::createReg(Reg));
1319  }
1320 
1321  template <int Width>
1322  void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1323  unsigned Base;
1324  switch (Width) {
1325  case 8: Base = AArch64::B0; break;
1326  case 16: Base = AArch64::H0; break;
1327  case 32: Base = AArch64::S0; break;
1328  case 64: Base = AArch64::D0; break;
1329  case 128: Base = AArch64::Q0; break;
1330  default:
1331  llvm_unreachable("Unsupported width");
1332  }
1333  Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1334  }
1335 
1336  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1337  assert(N == 1 && "Invalid number of operands!");
1338  assert(
1339  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1340  Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1341  }
1342 
1343  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1344  assert(N == 1 && "Invalid number of operands!");
1345  assert(
1346  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1348  }
1349 
1350  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1351  assert(N == 1 && "Invalid number of operands!");
1353  }
1354 
1355  enum VecListIndexType {
1356  VecListIdx_DReg = 0,
1357  VecListIdx_QReg = 1,
1358  VecListIdx_ZReg = 2,
1359  };
1360 
1361  template <VecListIndexType RegTy, unsigned NumRegs>
1362  void addVectorListOperands(MCInst &Inst, unsigned N) const {
1363  assert(N == 1 && "Invalid number of operands!");
1364  static const unsigned FirstRegs[][5] = {
1365  /* DReg */ { AArch64::Q0,
1366  AArch64::D0, AArch64::D0_D1,
1367  AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1368  /* QReg */ { AArch64::Q0,
1369  AArch64::Q0, AArch64::Q0_Q1,
1370  AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1371  /* ZReg */ { AArch64::Z0,
1372  AArch64::Z0, AArch64::Z0_Z1,
1373  AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1374  };
1375 
1376  assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1377  " NumRegs must be <= 4 for ZRegs");
1378 
1379  unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1380  Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1381  FirstRegs[(unsigned)RegTy][0]));
1382  }
1383 
1384  void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1385  assert(N == 1 && "Invalid number of operands!");
1386  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1387  }
1388 
1389  template <unsigned ImmIs0, unsigned ImmIs1>
1390  void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1391  assert(N == 1 && "Invalid number of operands!");
1392  assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1393  Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1394  }
1395 
1396  void addImmOperands(MCInst &Inst, unsigned N) const {
1397  assert(N == 1 && "Invalid number of operands!");
1398  // If this is a pageoff symrefexpr with an addend, adjust the addend
1399  // to be only the page-offset portion. Otherwise, just add the expr
1400  // as-is.
1401  addExpr(Inst, getImm());
1402  }
1403 
1404  template <int Shift>
1405  void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1406  assert(N == 2 && "Invalid number of operands!");
1407  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1408  Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1409  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1410  } else if (isShiftedImm()) {
1411  addExpr(Inst, getShiftedImmVal());
1412  Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1413  } else {
1414  addExpr(Inst, getImm());
1416  }
1417  }
1418 
1419  template <int Shift>
1420  void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1421  assert(N == 2 && "Invalid number of operands!");
1422  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1423  Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1424  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1425  } else
1426  llvm_unreachable("Not a shifted negative immediate");
1427  }
1428 
1429  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1430  assert(N == 1 && "Invalid number of operands!");
1431  Inst.addOperand(MCOperand::createImm(getCondCode()));
1432  }
1433 
1434  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1435  assert(N == 1 && "Invalid number of operands!");
1436  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1437  if (!MCE)
1438  addExpr(Inst, getImm());
1439  else
1440  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1441  }
1442 
1443  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1444  addImmOperands(Inst, N);
1445  }
1446 
1447  template<int Scale>
1448  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1449  assert(N == 1 && "Invalid number of operands!");
1450  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1451 
1452  if (!MCE) {
1453  Inst.addOperand(MCOperand::createExpr(getImm()));
1454  return;
1455  }
1456  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1457  }
1458 
1459  template <int Scale>
1460  void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1461  assert(N == 1 && "Invalid number of operands!");
1462  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1463  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1464  }
1465 
1466  template <typename T>
1467  void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1468  assert(N == 1 && "Invalid number of operands!");
1469  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1470  typename std::make_unsigned<T>::type Val = MCE->getValue();
1471  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1472  Inst.addOperand(MCOperand::createImm(encoding));
1473  }
1474 
1475  template <typename T>
1476  void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1477  assert(N == 1 && "Invalid number of operands!");
1478  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1479  typename std::make_unsigned<T>::type Val = ~MCE->getValue();
1480  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1481  Inst.addOperand(MCOperand::createImm(encoding));
1482  }
1483 
1484  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1485  assert(N == 1 && "Invalid number of operands!");
1486  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1487  uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1488  Inst.addOperand(MCOperand::createImm(encoding));
1489  }
1490 
1491  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1492  // Branch operands don't encode the low bits, so shift them off
1493  // here. If it's a label, however, just put it on directly as there's
1494  // not enough information now to do anything.
1495  assert(N == 1 && "Invalid number of operands!");
1496  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1497  if (!MCE) {
1498  addExpr(Inst, getImm());
1499  return;
1500  }
1501  assert(MCE && "Invalid constant immediate operand!");
1502  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1503  }
1504 
1505  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1506  // Branch operands don't encode the low bits, so shift them off
1507  // here. If it's a label, however, just put it on directly as there's
1508  // not enough information now to do anything.
1509  assert(N == 1 && "Invalid number of operands!");
1510  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1511  if (!MCE) {
1512  addExpr(Inst, getImm());
1513  return;
1514  }
1515  assert(MCE && "Invalid constant immediate operand!");
1516  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1517  }
1518 
1519  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1520  // Branch operands don't encode the low bits, so shift them off
1521  // here. If it's a label, however, just put it on directly as there's
1522  // not enough information now to do anything.
1523  assert(N == 1 && "Invalid number of operands!");
1524  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1525  if (!MCE) {
1526  addExpr(Inst, getImm());
1527  return;
1528  }
1529  assert(MCE && "Invalid constant immediate operand!");
1530  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1531  }
1532 
1533  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1534  assert(N == 1 && "Invalid number of operands!");
1536  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1537  }
1538 
1539  void addBarrierOperands(MCInst &Inst, unsigned N) const {
1540  assert(N == 1 && "Invalid number of operands!");
1541  Inst.addOperand(MCOperand::createImm(getBarrier()));
1542  }
1543 
1544  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1545  assert(N == 1 && "Invalid number of operands!");
1546 
1547  Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1548  }
1549 
1550  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1551  assert(N == 1 && "Invalid number of operands!");
1552 
1553  Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1554  }
1555 
1556  void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1557  assert(N == 1 && "Invalid number of operands!");
1558 
1559  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1560  }
1561 
1562  void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1563  assert(N == 1 && "Invalid number of operands!");
1564 
1565  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1566  }
1567 
1568  void addSysCROperands(MCInst &Inst, unsigned N) const {
1569  assert(N == 1 && "Invalid number of operands!");
1570  Inst.addOperand(MCOperand::createImm(getSysCR()));
1571  }
1572 
1573  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1574  assert(N == 1 && "Invalid number of operands!");
1575  Inst.addOperand(MCOperand::createImm(getPrefetch()));
1576  }
1577 
1578  void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1579  assert(N == 1 && "Invalid number of operands!");
1580  Inst.addOperand(MCOperand::createImm(getPSBHint()));
1581  }
1582 
1583  void addShifterOperands(MCInst &Inst, unsigned N) const {
1584  assert(N == 1 && "Invalid number of operands!");
1585  unsigned Imm =
1586  AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1587  Inst.addOperand(MCOperand::createImm(Imm));
1588  }
1589 
1590  void addExtendOperands(MCInst &Inst, unsigned N) const {
1591  assert(N == 1 && "Invalid number of operands!");
1592  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1593  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1594  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1595  Inst.addOperand(MCOperand::createImm(Imm));
1596  }
1597 
1598  void addExtend64Operands(MCInst &Inst, unsigned N) const {
1599  assert(N == 1 && "Invalid number of operands!");
1600  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1601  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1602  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1603  Inst.addOperand(MCOperand::createImm(Imm));
1604  }
1605 
1606  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1607  assert(N == 2 && "Invalid number of operands!");
1608  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1609  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1610  Inst.addOperand(MCOperand::createImm(IsSigned));
1611  Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1612  }
1613 
1614  // For 8-bit load/store instructions with a register offset, both the
1615  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1616  // they're disambiguated by whether the shift was explicit or implicit rather
1617  // than its size.
1618  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1619  assert(N == 2 && "Invalid number of operands!");
1620  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1621  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1622  Inst.addOperand(MCOperand::createImm(IsSigned));
1623  Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1624  }
1625 
1626  template<int Shift>
1627  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1628  assert(N == 1 && "Invalid number of operands!");
1629 
1630  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1631  uint64_t Value = CE->getValue();
1632  Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1633  }
1634 
1635  template<int Shift>
1636  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1637  assert(N == 1 && "Invalid number of operands!");
1638 
1639  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1640  uint64_t Value = CE->getValue();
1641  Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1642  }
1643 
1644  void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1645  assert(N == 1 && "Invalid number of operands!");
1646  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1647  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1648  }
1649 
1650  void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1651  assert(N == 1 && "Invalid number of operands!");
1652  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1653  Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1654  }
1655 
1656  void print(raw_ostream &OS) const override;
1657 
1658  static std::unique_ptr<AArch64Operand>
1659  CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1660  auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1661  Op->Tok.Data = Str.data();
1662  Op->Tok.Length = Str.size();
1663  Op->Tok.IsSuffix = IsSuffix;
1664  Op->StartLoc = S;
1665  Op->EndLoc = S;
1666  return Op;
1667  }
1668 
1669  static std::unique_ptr<AArch64Operand>
1670  CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1672  unsigned ShiftAmount = 0,
1673  unsigned HasExplicitAmount = false) {
1674  auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1675  Op->Reg.RegNum = RegNum;
1676  Op->Reg.Kind = Kind;
1677  Op->Reg.ElementWidth = 0;
1678  Op->Reg.ShiftExtend.Type = ExtTy;
1679  Op->Reg.ShiftExtend.Amount = ShiftAmount;
1680  Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1681  Op->StartLoc = S;
1682  Op->EndLoc = E;
1683  return Op;
1684  }
1685 
1686  static std::unique_ptr<AArch64Operand>
1687  CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1688  SMLoc S, SMLoc E, MCContext &Ctx,
1690  unsigned ShiftAmount = 0,
1691  unsigned HasExplicitAmount = false) {
1692  assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
1693  Kind == RegKind::SVEPredicateVector) &&
1694  "Invalid vector kind");
1695  auto Op = CreateReg(RegNum, Kind, S, E, Ctx, ExtTy, ShiftAmount,
1696  HasExplicitAmount);
1697  Op->Reg.ElementWidth = ElementWidth;
1698  return Op;
1699  }
1700 
1701  static std::unique_ptr<AArch64Operand>
1702  CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1703  unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1704  MCContext &Ctx) {
1705  auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1706  Op->VectorList.RegNum = RegNum;
1707  Op->VectorList.Count = Count;
1708  Op->VectorList.NumElements = NumElements;
1709  Op->VectorList.ElementWidth = ElementWidth;
1710  Op->VectorList.RegisterKind = RegisterKind;
1711  Op->StartLoc = S;
1712  Op->EndLoc = E;
1713  return Op;
1714  }
1715 
1716  static std::unique_ptr<AArch64Operand>
1717  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1718  auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1719  Op->VectorIndex.Val = Idx;
1720  Op->StartLoc = S;
1721  Op->EndLoc = E;
1722  return Op;
1723  }
1724 
1725  static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1726  SMLoc E, MCContext &Ctx) {
1727  auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1728  Op->Imm.Val = Val;
1729  Op->StartLoc = S;
1730  Op->EndLoc = E;
1731  return Op;
1732  }
1733 
1734  static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1735  unsigned ShiftAmount,
1736  SMLoc S, SMLoc E,
1737  MCContext &Ctx) {
1738  auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1739  Op->ShiftedImm .Val = Val;
1740  Op->ShiftedImm.ShiftAmount = ShiftAmount;
1741  Op->StartLoc = S;
1742  Op->EndLoc = E;
1743  return Op;
1744  }
1745 
1746  static std::unique_ptr<AArch64Operand>
1747  CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1748  auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1749  Op->CondCode.Code = Code;
1750  Op->StartLoc = S;
1751  Op->EndLoc = E;
1752  return Op;
1753  }
1754 
1755  static std::unique_ptr<AArch64Operand>
1756  CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1757  auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1758  Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1759  Op->FPImm.IsExact = IsExact;
1760  Op->StartLoc = S;
1761  Op->EndLoc = S;
1762  return Op;
1763  }
1764 
1765  static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1766  StringRef Str,
1767  SMLoc S,
1768  MCContext &Ctx) {
1769  auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1770  Op->Barrier.Val = Val;
1771  Op->Barrier.Data = Str.data();
1772  Op->Barrier.Length = Str.size();
1773  Op->StartLoc = S;
1774  Op->EndLoc = S;
1775  return Op;
1776  }
1777 
1778  static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1779  uint32_t MRSReg,
1780  uint32_t MSRReg,
1781  uint32_t PStateField,
1782  MCContext &Ctx) {
1783  auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1784  Op->SysReg.Data = Str.data();
1785  Op->SysReg.Length = Str.size();
1786  Op->SysReg.MRSReg = MRSReg;
1787  Op->SysReg.MSRReg = MSRReg;
1788  Op->SysReg.PStateField = PStateField;
1789  Op->StartLoc = S;
1790  Op->EndLoc = S;
1791  return Op;
1792  }
1793 
1794  static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1795  SMLoc E, MCContext &Ctx) {
1796  auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1797  Op->SysCRImm.Val = Val;
1798  Op->StartLoc = S;
1799  Op->EndLoc = E;
1800  return Op;
1801  }
1802 
1803  static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1804  StringRef Str,
1805  SMLoc S,
1806  MCContext &Ctx) {
1807  auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1808  Op->Prefetch.Val = Val;
1809  Op->Barrier.Data = Str.data();
1810  Op->Barrier.Length = Str.size();
1811  Op->StartLoc = S;
1812  Op->EndLoc = S;
1813  return Op;
1814  }
1815 
1816  static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1817  StringRef Str,
1818  SMLoc S,
1819  MCContext &Ctx) {
1820  auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1821  Op->PSBHint.Val = Val;
1822  Op->PSBHint.Data = Str.data();
1823  Op->PSBHint.Length = Str.size();
1824  Op->StartLoc = S;
1825  Op->EndLoc = S;
1826  return Op;
1827  }
1828 
1829  static std::unique_ptr<AArch64Operand>
1830  CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1831  bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1832  auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1833  Op->ShiftExtend.Type = ShOp;
1834  Op->ShiftExtend.Amount = Val;
1835  Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1836  Op->StartLoc = S;
1837  Op->EndLoc = E;
1838  return Op;
1839  }
1840 };
1841 
1842 } // end anonymous namespace.
1843 
1844 void AArch64Operand::print(raw_ostream &OS) const {
1845  switch (Kind) {
1846  case k_FPImm:
1847  OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
1848  if (!getFPImmIsExact())
1849  OS << " (inexact)";
1850  OS << ">";
1851  break;
1852  case k_Barrier: {
1853  StringRef Name = getBarrierName();
1854  if (!Name.empty())
1855  OS << "<barrier " << Name << ">";
1856  else
1857  OS << "<barrier invalid #" << getBarrier() << ">";
1858  break;
1859  }
1860  case k_Immediate:
1861  OS << *getImm();
1862  break;
1863  case k_ShiftedImm: {
1864  unsigned Shift = getShiftedImmShift();
1865  OS << "<shiftedimm ";
1866  OS << *getShiftedImmVal();
1867  OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1868  break;
1869  }
1870  case k_CondCode:
1871  OS << "<condcode " << getCondCode() << ">";
1872  break;
1873  case k_VectorList: {
1874  OS << "<vectorlist ";
1875  unsigned Reg = getVectorListStart();
1876  for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1877  OS << Reg + i << " ";
1878  OS << ">";
1879  break;
1880  }
1881  case k_VectorIndex:
1882  OS << "<vectorindex " << getVectorIndex() << ">";
1883  break;
1884  case k_SysReg:
1885  OS << "<sysreg: " << getSysReg() << '>';
1886  break;
1887  case k_Token:
1888  OS << "'" << getToken() << "'";
1889  break;
1890  case k_SysCR:
1891  OS << "c" << getSysCR();
1892  break;
1893  case k_Prefetch: {
1894  StringRef Name = getPrefetchName();
1895  if (!Name.empty())
1896  OS << "<prfop " << Name << ">";
1897  else
1898  OS << "<prfop invalid #" << getPrefetch() << ">";
1899  break;
1900  }
1901  case k_PSBHint:
1902  OS << getPSBHintName();
1903  break;
1904  case k_Register:
1905  OS << "<register " << getReg() << ">";
1906  if (!getShiftExtendAmount() && !hasShiftExtendAmount())
1907  break;
1909  case k_ShiftExtend:
1910  OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1911  << getShiftExtendAmount();
1912  if (!hasShiftExtendAmount())
1913  OS << "<imp>";
1914  OS << '>';
1915  break;
1916  }
1917 }
1918 
1919 /// @name Auto-generated Match Functions
1920 /// {
1921 
1922 static unsigned MatchRegisterName(StringRef Name);
1923 
1924 /// }
1925 
1926 static unsigned MatchNeonVectorRegName(StringRef Name) {
1927  return StringSwitch<unsigned>(Name.lower())
1928  .Case("v0", AArch64::Q0)
1929  .Case("v1", AArch64::Q1)
1930  .Case("v2", AArch64::Q2)
1931  .Case("v3", AArch64::Q3)
1932  .Case("v4", AArch64::Q4)
1933  .Case("v5", AArch64::Q5)
1934  .Case("v6", AArch64::Q6)
1935  .Case("v7", AArch64::Q7)
1936  .Case("v8", AArch64::Q8)
1937  .Case("v9", AArch64::Q9)
1938  .Case("v10", AArch64::Q10)
1939  .Case("v11", AArch64::Q11)
1940  .Case("v12", AArch64::Q12)
1941  .Case("v13", AArch64::Q13)
1942  .Case("v14", AArch64::Q14)
1943  .Case("v15", AArch64::Q15)
1944  .Case("v16", AArch64::Q16)
1945  .Case("v17", AArch64::Q17)
1946  .Case("v18", AArch64::Q18)
1947  .Case("v19", AArch64::Q19)
1948  .Case("v20", AArch64::Q20)
1949  .Case("v21", AArch64::Q21)
1950  .Case("v22", AArch64::Q22)
1951  .Case("v23", AArch64::Q23)
1952  .Case("v24", AArch64::Q24)
1953  .Case("v25", AArch64::Q25)
1954  .Case("v26", AArch64::Q26)
1955  .Case("v27", AArch64::Q27)
1956  .Case("v28", AArch64::Q28)
1957  .Case("v29", AArch64::Q29)
1958  .Case("v30", AArch64::Q30)
1959  .Case("v31", AArch64::Q31)
1960  .Default(0);
1961 }
1962 
1963 /// Returns an optional pair of (#elements, element-width) if Suffix
1964 /// is a valid vector kind. Where the number of elements in a vector
1965 /// or the vector width is implicit or explicitly unknown (but still a
1966 /// valid suffix kind), 0 is used.
1967 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
1968  RegKind VectorKind) {
1969  std::pair<int, int> Res = {-1, -1};
1970 
1971  switch (VectorKind) {
1972  case RegKind::NeonVector:
1973  Res =
1974  StringSwitch<std::pair<int, int>>(Suffix.lower())
1975  .Case("", {0, 0})
1976  .Case(".1d", {1, 64})
1977  .Case(".1q", {1, 128})
1978  // '.2h' needed for fp16 scalar pairwise reductions
1979  .Case(".2h", {2, 16})
1980  .Case(".2s", {2, 32})
1981  .Case(".2d", {2, 64})
1982  // '.4b' is another special case for the ARMv8.2a dot product
1983  // operand
1984  .Case(".4b", {4, 8})
1985  .Case(".4h", {4, 16})
1986  .Case(".4s", {4, 32})
1987  .Case(".8b", {8, 8})
1988  .Case(".8h", {8, 16})
1989  .Case(".16b", {16, 8})
1990  // Accept the width neutral ones, too, for verbose syntax. If those
1991  // aren't used in the right places, the token operand won't match so
1992  // all will work out.
1993  .Case(".b", {0, 8})
1994  .Case(".h", {0, 16})
1995  .Case(".s", {0, 32})
1996  .Case(".d", {0, 64})
1997  .Default({-1, -1});
1998  break;
1999  case RegKind::SVEPredicateVector:
2000  case RegKind::SVEDataVector:
2001  Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2002  .Case("", {0, 0})
2003  .Case(".b", {0, 8})
2004  .Case(".h", {0, 16})
2005  .Case(".s", {0, 32})
2006  .Case(".d", {0, 64})
2007  .Case(".q", {0, 128})
2008  .Default({-1, -1});
2009  break;
2010  default:
2011  llvm_unreachable("Unsupported RegKind");
2012  }
2013 
2014  if (Res == std::make_pair(-1, -1))
2015  return Optional<std::pair<int, int>>();
2016 
2017  return Optional<std::pair<int, int>>(Res);
2018 }
2019 
2020 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2021  return parseVectorKind(Suffix, VectorKind).hasValue();
2022 }
2023 
2024 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2025  return StringSwitch<unsigned>(Name.lower())
2026  .Case("z0", AArch64::Z0)
2027  .Case("z1", AArch64::Z1)
2028  .Case("z2", AArch64::Z2)
2029  .Case("z3", AArch64::Z3)
2030  .Case("z4", AArch64::Z4)
2031  .Case("z5", AArch64::Z5)
2032  .Case("z6", AArch64::Z6)
2033  .Case("z7", AArch64::Z7)
2034  .Case("z8", AArch64::Z8)
2035  .Case("z9", AArch64::Z9)
2036  .Case("z10", AArch64::Z10)
2037  .Case("z11", AArch64::Z11)
2038  .Case("z12", AArch64::Z12)
2039  .Case("z13", AArch64::Z13)
2040  .Case("z14", AArch64::Z14)
2041  .Case("z15", AArch64::Z15)
2042  .Case("z16", AArch64::Z16)
2043  .Case("z17", AArch64::Z17)
2044  .Case("z18", AArch64::Z18)
2045  .Case("z19", AArch64::Z19)
2046  .Case("z20", AArch64::Z20)
2047  .Case("z21", AArch64::Z21)
2048  .Case("z22", AArch64::Z22)
2049  .Case("z23", AArch64::Z23)
2050  .Case("z24", AArch64::Z24)
2051  .Case("z25", AArch64::Z25)
2052  .Case("z26", AArch64::Z26)
2053  .Case("z27", AArch64::Z27)
2054  .Case("z28", AArch64::Z28)
2055  .Case("z29", AArch64::Z29)
2056  .Case("z30", AArch64::Z30)
2057  .Case("z31", AArch64::Z31)
2058  .Default(0);
2059 }
2060 
2061 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2062  return StringSwitch<unsigned>(Name.lower())
2063  .Case("p0", AArch64::P0)
2064  .Case("p1", AArch64::P1)
2065  .Case("p2", AArch64::P2)
2066  .Case("p3", AArch64::P3)
2067  .Case("p4", AArch64::P4)
2068  .Case("p5", AArch64::P5)
2069  .Case("p6", AArch64::P6)
2070  .Case("p7", AArch64::P7)
2071  .Case("p8", AArch64::P8)
2072  .Case("p9", AArch64::P9)
2073  .Case("p10", AArch64::P10)
2074  .Case("p11", AArch64::P11)
2075  .Case("p12", AArch64::P12)
2076  .Case("p13", AArch64::P13)
2077  .Case("p14", AArch64::P14)
2078  .Case("p15", AArch64::P15)
2079  .Default(0);
2080 }
2081 
2082 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2083  SMLoc &EndLoc) {
2084  StartLoc = getLoc();
2085  auto Res = tryParseScalarRegister(RegNo);
2086  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2087  return Res != MatchOperand_Success;
2088 }
2089 
2090 // Matches a register name or register alias previously defined by '.req'
2091 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2092  RegKind Kind) {
2093  unsigned RegNum = 0;
2094  if ((RegNum = matchSVEDataVectorRegName(Name)))
2095  return Kind == RegKind::SVEDataVector ? RegNum : 0;
2096 
2097  if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2098  return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2099 
2100  if ((RegNum = MatchNeonVectorRegName(Name)))
2101  return Kind == RegKind::NeonVector ? RegNum : 0;
2102 
2103  // The parsed register must be of RegKind Scalar
2104  if ((RegNum = MatchRegisterName(Name)))
2105  return Kind == RegKind::Scalar ? RegNum : 0;
2106 
2107  if (!RegNum) {
2108  // Handle a few common aliases of registers.
2109  if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2110  .Case("fp", AArch64::FP)
2111  .Case("lr", AArch64::LR)
2112  .Case("x31", AArch64::XZR)
2113  .Case("w31", AArch64::WZR)
2114  .Default(0))
2115  return Kind == RegKind::Scalar ? RegNum : 0;
2116 
2117  // Check for aliases registered via .req. Canonicalize to lower case.
2118  // That's more consistent since register names are case insensitive, and
2119  // it's how the original entry was passed in from MC/MCParser/AsmParser.
2120  auto Entry = RegisterReqs.find(Name.lower());
2121  if (Entry == RegisterReqs.end())
2122  return 0;
2123 
2124  // set RegNum if the match is the right kind of register
2125  if (Kind == Entry->getValue().first)
2126  RegNum = Entry->getValue().second;
2127  }
2128  return RegNum;
2129 }
2130 
2131 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2132 /// Identifier when called, and if it is a register name the token is eaten and
2133 /// the register is added to the operand list.
2135 AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2136  MCAsmParser &Parser = getParser();
2137  const AsmToken &Tok = Parser.getTok();
2138  if (Tok.isNot(AsmToken::Identifier))
2139  return MatchOperand_NoMatch;
2140 
2141  std::string lowerCase = Tok.getString().lower();
2142  unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2143  if (Reg == 0)
2144  return MatchOperand_NoMatch;
2145 
2146  RegNum = Reg;
2147  Parser.Lex(); // Eat identifier token.
2148  return MatchOperand_Success;
2149 }
2150 
2151 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2153 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2154  MCAsmParser &Parser = getParser();
2155  SMLoc S = getLoc();
2156 
2157  if (Parser.getTok().isNot(AsmToken::Identifier)) {
2158  Error(S, "Expected cN operand where 0 <= N <= 15");
2159  return MatchOperand_ParseFail;
2160  }
2161 
2162  StringRef Tok = Parser.getTok().getIdentifier();
2163  if (Tok[0] != 'c' && Tok[0] != 'C') {
2164  Error(S, "Expected cN operand where 0 <= N <= 15");
2165  return MatchOperand_ParseFail;
2166  }
2167 
2168  uint32_t CRNum;
2169  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2170  if (BadNum || CRNum > 15) {
2171  Error(S, "Expected cN operand where 0 <= N <= 15");
2172  return MatchOperand_ParseFail;
2173  }
2174 
2175  Parser.Lex(); // Eat identifier token.
2176  Operands.push_back(
2177  AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2178  return MatchOperand_Success;
2179 }
2180 
2181 /// tryParsePrefetch - Try to parse a prefetch operand.
2182 template <bool IsSVEPrefetch>
2184 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2185  MCAsmParser &Parser = getParser();
2186  SMLoc S = getLoc();
2187  const AsmToken &Tok = Parser.getTok();
2188 
2189  auto LookupByName = [](StringRef N) {
2190  if (IsSVEPrefetch) {
2191  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2192  return Optional<unsigned>(Res->Encoding);
2193  } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2194  return Optional<unsigned>(Res->Encoding);
2195  return Optional<unsigned>();
2196  };
2197 
2198  auto LookupByEncoding = [](unsigned E) {
2199  if (IsSVEPrefetch) {
2200  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2201  return Optional<StringRef>(Res->Name);
2202  } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2203  return Optional<StringRef>(Res->Name);
2204  return Optional<StringRef>();
2205  };
2206  unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2207 
2208  // Either an identifier for named values or a 5-bit immediate.
2209  // Eat optional hash.
2210  if (parseOptionalToken(AsmToken::Hash) ||
2211  Tok.is(AsmToken::Integer)) {
2212  const MCExpr *ImmVal;
2213  if (getParser().parseExpression(ImmVal))
2214  return MatchOperand_ParseFail;
2215 
2216  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2217  if (!MCE) {
2218  TokError("immediate value expected for prefetch operand");
2219  return MatchOperand_ParseFail;
2220  }
2221  unsigned prfop = MCE->getValue();
2222  if (prfop > MaxVal) {
2223  TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2224  "] expected");
2225  return MatchOperand_ParseFail;
2226  }
2227 
2228  auto PRFM = LookupByEncoding(MCE->getValue());
2229  Operands.push_back(AArch64Operand::CreatePrefetch(
2230  prfop, PRFM.getValueOr(""), S, getContext()));
2231  return MatchOperand_Success;
2232  }
2233 
2234  if (Tok.isNot(AsmToken::Identifier)) {
2235  TokError("prefetch hint expected");
2236  return MatchOperand_ParseFail;
2237  }
2238 
2239  auto PRFM = LookupByName(Tok.getString());
2240  if (!PRFM) {
2241  TokError("prefetch hint expected");
2242  return MatchOperand_ParseFail;
2243  }
2244 
2245  Parser.Lex(); // Eat identifier token.
2246  Operands.push_back(AArch64Operand::CreatePrefetch(
2247  *PRFM, Tok.getString(), S, getContext()));
2248  return MatchOperand_Success;
2249 }
2250 
2251 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2253 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2254  MCAsmParser &Parser = getParser();
2255  SMLoc S = getLoc();
2256  const AsmToken &Tok = Parser.getTok();
2257  if (Tok.isNot(AsmToken::Identifier)) {
2258  TokError("invalid operand for instruction");
2259  return MatchOperand_ParseFail;
2260  }
2261 
2262  auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2263  if (!PSB) {
2264  TokError("invalid operand for instruction");
2265  return MatchOperand_ParseFail;
2266  }
2267 
2268  Parser.Lex(); // Eat identifier token.
2269  Operands.push_back(AArch64Operand::CreatePSBHint(
2270  PSB->Encoding, Tok.getString(), S, getContext()));
2271  return MatchOperand_Success;
2272 }
2273 
2274 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2275 /// instruction.
2277 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2278  MCAsmParser &Parser = getParser();
2279  SMLoc S = getLoc();
2280  const MCExpr *Expr;
2281 
2282  if (Parser.getTok().is(AsmToken::Hash)) {
2283  Parser.Lex(); // Eat hash token.
2284  }
2285 
2286  if (parseSymbolicImmVal(Expr))
2287  return MatchOperand_ParseFail;
2288 
2289  AArch64MCExpr::VariantKind ELFRefKind;
2290  MCSymbolRefExpr::VariantKind DarwinRefKind;
2291  int64_t Addend;
2292  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2293  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2294  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2295  // No modifier was specified at all; this is the syntax for an ELF basic
2296  // ADRP relocation (unfortunately).
2297  Expr =
2299  } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2300  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2301  Addend != 0) {
2302  Error(S, "gotpage label reference not allowed an addend");
2303  return MatchOperand_ParseFail;
2304  } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2305  DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2306  DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2307  ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2308  ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2309  ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2310  // The operand must be an @page or @gotpage qualified symbolref.
2311  Error(S, "page or gotpage label reference expected");
2312  return MatchOperand_ParseFail;
2313  }
2314  }
2315 
2316  // We have either a label reference possibly with addend or an immediate. The
2317  // addend is a raw value here. The linker will adjust it to only reference the
2318  // page.
2319  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2320  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2321 
2322  return MatchOperand_Success;
2323 }
2324 
2325 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2326 /// instruction.
2328 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2329  SMLoc S = getLoc();
2330  const MCExpr *Expr;
2331 
2332  parseOptionalToken(AsmToken::Hash);
2333  if (getParser().parseExpression(Expr))
2334  return MatchOperand_ParseFail;
2335 
2336  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2337  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2338 
2339  return MatchOperand_Success;
2340 }
2341 
2342 /// tryParseFPImm - A floating point immediate expression operand.
2343 template<bool AddFPZeroAsLiteral>
2345 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2346  MCAsmParser &Parser = getParser();
2347  SMLoc S = getLoc();
2348 
2349  bool Hash = parseOptionalToken(AsmToken::Hash);
2350 
2351  // Handle negation, as that still comes through as a separate token.
2352  bool isNegative = parseOptionalToken(AsmToken::Minus);
2353 
2354  const AsmToken &Tok = Parser.getTok();
2355  if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2356  if (!Hash)
2357  return MatchOperand_NoMatch;
2358  TokError("invalid floating point immediate");
2359  return MatchOperand_ParseFail;
2360  }
2361 
2362  // Parse hexadecimal representation.
2363  if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2364  if (Tok.getIntVal() > 255 || isNegative) {
2365  TokError("encoded floating point value out of range");
2366  return MatchOperand_ParseFail;
2367  }
2368 
2369  APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2370  Operands.push_back(
2371  AArch64Operand::CreateFPImm(F, true, S, getContext()));
2372  } else {
2373  // Parse FP representation.
2374  APFloat RealVal(APFloat::IEEEdouble());
2375  auto Status =
2377  if (isNegative)
2378  RealVal.changeSign();
2379 
2380  if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2381  Operands.push_back(
2382  AArch64Operand::CreateToken("#0", false, S, getContext()));
2383  Operands.push_back(
2384  AArch64Operand::CreateToken(".0", false, S, getContext()));
2385  } else
2386  Operands.push_back(AArch64Operand::CreateFPImm(
2387  RealVal, Status == APFloat::opOK, S, getContext()));
2388  }
2389 
2390  Parser.Lex(); // Eat the token.
2391 
2392  return MatchOperand_Success;
2393 }
2394 
2395 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2396 /// a shift suffix, for example '#1, lsl #12'.
2398 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2399  MCAsmParser &Parser = getParser();
2400  SMLoc S = getLoc();
2401 
2402  if (Parser.getTok().is(AsmToken::Hash))
2403  Parser.Lex(); // Eat '#'
2404  else if (Parser.getTok().isNot(AsmToken::Integer))
2405  // Operand should start from # or should be integer, emit error otherwise.
2406  return MatchOperand_NoMatch;
2407 
2408  const MCExpr *Imm;
2409  if (parseSymbolicImmVal(Imm))
2410  return MatchOperand_ParseFail;
2411  else if (Parser.getTok().isNot(AsmToken::Comma)) {
2412  SMLoc E = Parser.getTok().getLoc();
2413  Operands.push_back(
2414  AArch64Operand::CreateImm(Imm, S, E, getContext()));
2415  return MatchOperand_Success;
2416  }
2417 
2418  // Eat ','
2419  Parser.Lex();
2420 
2421  // The optional operand must be "lsl #N" where N is non-negative.
2422  if (!Parser.getTok().is(AsmToken::Identifier) ||
2423  !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2424  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2425  return MatchOperand_ParseFail;
2426  }
2427 
2428  // Eat 'lsl'
2429  Parser.Lex();
2430 
2431  parseOptionalToken(AsmToken::Hash);
2432 
2433  if (Parser.getTok().isNot(AsmToken::Integer)) {
2434  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2435  return MatchOperand_ParseFail;
2436  }
2437 
2438  int64_t ShiftAmount = Parser.getTok().getIntVal();
2439 
2440  if (ShiftAmount < 0) {
2441  Error(Parser.getTok().getLoc(), "positive shift amount required");
2442  return MatchOperand_ParseFail;
2443  }
2444  Parser.Lex(); // Eat the number
2445 
2446  // Just in case the optional lsl #0 is used for immediates other than zero.
2447  if (ShiftAmount == 0 && Imm != 0) {
2448  SMLoc E = Parser.getTok().getLoc();
2449  Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2450  return MatchOperand_Success;
2451  }
2452 
2453  SMLoc E = Parser.getTok().getLoc();
2454  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2455  S, E, getContext()));
2456  return MatchOperand_Success;
2457 }
2458 
2459 /// parseCondCodeString - Parse a Condition Code string.
2460 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2462  .Case("eq", AArch64CC::EQ)
2463  .Case("ne", AArch64CC::NE)
2464  .Case("cs", AArch64CC::HS)
2465  .Case("hs", AArch64CC::HS)
2466  .Case("cc", AArch64CC::LO)
2467  .Case("lo", AArch64CC::LO)
2468  .Case("mi", AArch64CC::MI)
2469  .Case("pl", AArch64CC::PL)
2470  .Case("vs", AArch64CC::VS)
2471  .Case("vc", AArch64CC::VC)
2472  .Case("hi", AArch64CC::HI)
2473  .Case("ls", AArch64CC::LS)
2474  .Case("ge", AArch64CC::GE)
2475  .Case("lt", AArch64CC::LT)
2476  .Case("gt", AArch64CC::GT)
2477  .Case("le", AArch64CC::LE)
2478  .Case("al", AArch64CC::AL)
2479  .Case("nv", AArch64CC::NV)
2481  return CC;
2482 }
2483 
2484 /// parseCondCode - Parse a Condition Code operand.
2485 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2486  bool invertCondCode) {
2487  MCAsmParser &Parser = getParser();
2488  SMLoc S = getLoc();
2489  const AsmToken &Tok = Parser.getTok();
2490  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2491 
2492  StringRef Cond = Tok.getString();
2493  AArch64CC::CondCode CC = parseCondCodeString(Cond);
2494  if (CC == AArch64CC::Invalid)
2495  return TokError("invalid condition code");
2496  Parser.Lex(); // Eat identifier token.
2497 
2498  if (invertCondCode) {
2499  if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2500  return TokError("condition codes AL and NV are invalid for this instruction");
2502  }
2503 
2504  Operands.push_back(
2505  AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2506  return false;
2507 }
2508 
2509 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2510 /// them if present.
2512 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2513  MCAsmParser &Parser = getParser();
2514  const AsmToken &Tok = Parser.getTok();
2515  std::string LowerID = Tok.getString().lower();
2518  .Case("lsl", AArch64_AM::LSL)
2519  .Case("lsr", AArch64_AM::LSR)
2520  .Case("asr", AArch64_AM::ASR)
2521  .Case("ror", AArch64_AM::ROR)
2522  .Case("msl", AArch64_AM::MSL)
2523  .Case("uxtb", AArch64_AM::UXTB)
2524  .Case("uxth", AArch64_AM::UXTH)
2525  .Case("uxtw", AArch64_AM::UXTW)
2526  .Case("uxtx", AArch64_AM::UXTX)
2527  .Case("sxtb", AArch64_AM::SXTB)
2528  .Case("sxth", AArch64_AM::SXTH)
2529  .Case("sxtw", AArch64_AM::SXTW)
2530  .Case("sxtx", AArch64_AM::SXTX)
2532 
2533  if (ShOp == AArch64_AM::InvalidShiftExtend)
2534  return MatchOperand_NoMatch;
2535 
2536  SMLoc S = Tok.getLoc();
2537  Parser.Lex();
2538 
2539  bool Hash = parseOptionalToken(AsmToken::Hash);
2540 
2541  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2542  if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2543  ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2544  ShOp == AArch64_AM::MSL) {
2545  // We expect a number here.
2546  TokError("expected #imm after shift specifier");
2547  return MatchOperand_ParseFail;
2548  }
2549 
2550  // "extend" type operations don't need an immediate, #0 is implicit.
2551  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2552  Operands.push_back(
2553  AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2554  return MatchOperand_Success;
2555  }
2556 
2557  // Make sure we do actually have a number, identifier or a parenthesized
2558  // expression.
2559  SMLoc E = Parser.getTok().getLoc();
2560  if (!Parser.getTok().is(AsmToken::Integer) &&
2561  !Parser.getTok().is(AsmToken::LParen) &&
2562  !Parser.getTok().is(AsmToken::Identifier)) {
2563  Error(E, "expected integer shift amount");
2564  return MatchOperand_ParseFail;
2565  }
2566 
2567  const MCExpr *ImmVal;
2568  if (getParser().parseExpression(ImmVal))
2569  return MatchOperand_ParseFail;
2570 
2571  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2572  if (!MCE) {
2573  Error(E, "expected constant '#imm' after shift specifier");
2574  return MatchOperand_ParseFail;
2575  }
2576 
2577  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2578  Operands.push_back(AArch64Operand::CreateShiftExtend(
2579  ShOp, MCE->getValue(), true, S, E, getContext()));
2580  return MatchOperand_Success;
2581 }
2582 
2583 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2584  if (FBS[AArch64::HasV8_1aOps])
2585  Str += "ARMv8.1a";
2586  else if (FBS[AArch64::HasV8_2aOps])
2587  Str += "ARMv8.2a";
2588  else
2589  Str += "(unknown)";
2590 }
2591 
2592 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2593  SMLoc S) {
2594  const uint16_t Op2 = Encoding & 7;
2595  const uint16_t Cm = (Encoding & 0x78) >> 3;
2596  const uint16_t Cn = (Encoding & 0x780) >> 7;
2597  const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2598 
2599  const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2600 
2601  Operands.push_back(
2602  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2603  Operands.push_back(
2604  AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2605  Operands.push_back(
2606  AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2607  Expr = MCConstantExpr::create(Op2, getContext());
2608  Operands.push_back(
2609  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2610 }
2611 
2612 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2613 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2614 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2615  OperandVector &Operands) {
2616  if (Name.find('.') != StringRef::npos)
2617  return TokError("invalid operand");
2618 
2619  Mnemonic = Name;
2620  Operands.push_back(
2621  AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2622 
2623  MCAsmParser &Parser = getParser();
2624  const AsmToken &Tok = Parser.getTok();
2625  StringRef Op = Tok.getString();
2626  SMLoc S = Tok.getLoc();
2627 
2628  if (Mnemonic == "ic") {
2629  const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2630  if (!IC)
2631  return TokError("invalid operand for IC instruction");
2632  else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2633  std::string Str("IC " + std::string(IC->Name) + " requires ");
2635  return TokError(Str.c_str());
2636  }
2637  createSysAlias(IC->Encoding, Operands, S);
2638  } else if (Mnemonic == "dc") {
2639  const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2640  if (!DC)
2641  return TokError("invalid operand for DC instruction");
2642  else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2643  std::string Str("DC " + std::string(DC->Name) + " requires ");
2645  return TokError(Str.c_str());
2646  }
2647  createSysAlias(DC->Encoding, Operands, S);
2648  } else if (Mnemonic == "at") {
2649  const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2650  if (!AT)
2651  return TokError("invalid operand for AT instruction");
2652  else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2653  std::string Str("AT " + std::string(AT->Name) + " requires ");
2655  return TokError(Str.c_str());
2656  }
2657  createSysAlias(AT->Encoding, Operands, S);
2658  } else if (Mnemonic == "tlbi") {
2659  const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2660  if (!TLBI)
2661  return TokError("invalid operand for TLBI instruction");
2662  else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2663  std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2665  return TokError(Str.c_str());
2666  }
2667  createSysAlias(TLBI->Encoding, Operands, S);
2668  }
2669 
2670  Parser.Lex(); // Eat operand.
2671 
2672  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2673  bool HasRegister = false;
2674 
2675  // Check for the optional register operand.
2676  if (parseOptionalToken(AsmToken::Comma)) {
2677  if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2678  return TokError("expected register operand");
2679  HasRegister = true;
2680  }
2681 
2682  if (ExpectRegister && !HasRegister)
2683  return TokError("specified " + Mnemonic + " op requires a register");
2684  else if (!ExpectRegister && HasRegister)
2685  return TokError("specified " + Mnemonic + " op does not use a register");
2686 
2687  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2688  return true;
2689 
2690  return false;
2691 }
2692 
2694 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2695  MCAsmParser &Parser = getParser();
2696  const AsmToken &Tok = Parser.getTok();
2697 
2698  // Can be either a #imm style literal or an option name
2699  if (parseOptionalToken(AsmToken::Hash) ||
2700  Tok.is(AsmToken::Integer)) {
2701  // Immediate operand.
2702  const MCExpr *ImmVal;
2703  SMLoc ExprLoc = getLoc();
2704  if (getParser().parseExpression(ImmVal))
2705  return MatchOperand_ParseFail;
2706  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2707  if (!MCE) {
2708  Error(ExprLoc, "immediate value expected for barrier operand");
2709  return MatchOperand_ParseFail;
2710  }
2711  if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2712  Error(ExprLoc, "barrier operand out of range");
2713  return MatchOperand_ParseFail;
2714  }
2715  auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
2716  Operands.push_back(AArch64Operand::CreateBarrier(
2717  MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
2718  return MatchOperand_Success;
2719  }
2720 
2721  if (Tok.isNot(AsmToken::Identifier)) {
2722  TokError("invalid operand for instruction");
2723  return MatchOperand_ParseFail;
2724  }
2725 
2726  // The only valid named option for ISB is 'sy'
2727  auto DB = AArch64DB::lookupDBByName(Tok.getString());
2728  if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
2729  TokError("'sy' or #imm operand expected");
2730  return MatchOperand_ParseFail;
2731  } else if (!DB) {
2732  TokError("invalid barrier option name");
2733  return MatchOperand_ParseFail;
2734  }
2735 
2736  Operands.push_back(AArch64Operand::CreateBarrier(
2737  DB->Encoding, Tok.getString(), getLoc(), getContext()));
2738  Parser.Lex(); // Consume the option
2739 
2740  return MatchOperand_Success;
2741 }
2742 
2744 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2745  MCAsmParser &Parser = getParser();
2746  const AsmToken &Tok = Parser.getTok();
2747 
2748  if (Tok.isNot(AsmToken::Identifier))
2749  return MatchOperand_NoMatch;
2750 
2751  int MRSReg, MSRReg;
2752  auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
2753  if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
2754  MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
2755  MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
2756  } else
2757  MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
2758 
2759  auto PState = AArch64PState::lookupPStateByName(Tok.getString());
2760  unsigned PStateImm = -1;
2761  if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
2762  PStateImm = PState->Encoding;
2763 
2764  Operands.push_back(
2765  AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
2766  PStateImm, getContext()));
2767  Parser.Lex(); // Eat identifier
2768 
2769  return MatchOperand_Success;
2770 }
2771 
2772 /// tryParseNeonVectorRegister - Parse a vector register operand.
2773 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
2774  MCAsmParser &Parser = getParser();
2775  if (Parser.getTok().isNot(AsmToken::Identifier))
2776  return true;
2777 
2778  SMLoc S = getLoc();
2779  // Check for a vector register specifier first.
2780  StringRef Kind;
2781  unsigned Reg;
2782  OperandMatchResultTy Res =
2783  tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
2784  if (Res != MatchOperand_Success)
2785  return true;
2786 
2787  const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
2788  if (!KindRes)
2789  return true;
2790 
2791  unsigned ElementWidth = KindRes->second;
2792  Operands.push_back(
2793  AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
2794  S, getLoc(), getContext()));
2795 
2796  // If there was an explicit qualifier, that goes on as a literal text
2797  // operand.
2798  if (!Kind.empty())
2799  Operands.push_back(
2800  AArch64Operand::CreateToken(Kind, false, S, getContext()));
2801 
2802  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
2803 }
2804 
2806 AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
2807  SMLoc SIdx = getLoc();
2808  if (parseOptionalToken(AsmToken::LBrac)) {
2809  const MCExpr *ImmVal;
2810  if (getParser().parseExpression(ImmVal))
2811  return MatchOperand_NoMatch;
2812  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2813  if (!MCE) {
2814  TokError("immediate value expected for vector index");
2815  return MatchOperand_ParseFail;;
2816  }
2817 
2818  SMLoc E = getLoc();
2819 
2820  if (parseToken(AsmToken::RBrac, "']' expected"))
2821  return MatchOperand_ParseFail;;
2822 
2823  Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2824  E, getContext()));
2825  return MatchOperand_Success;
2826  }
2827 
2828  return MatchOperand_NoMatch;
2829 }
2830 
2831 // tryParseVectorRegister - Try to parse a vector register name with
2832 // optional kind specifier. If it is a register specifier, eat the token
2833 // and return it.
2835 AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
2836  RegKind MatchKind) {
2837  MCAsmParser &Parser = getParser();
2838  const AsmToken &Tok = Parser.getTok();
2839 
2840  if (Tok.isNot(AsmToken::Identifier))
2841  return MatchOperand_NoMatch;
2842 
2843  StringRef Name = Tok.getString();
2844  // If there is a kind specifier, it's separated from the register name by
2845  // a '.'.
2846  size_t Start = 0, Next = Name.find('.');
2847  StringRef Head = Name.slice(Start, Next);
2848  unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
2849 
2850  if (RegNum) {
2851  if (Next != StringRef::npos) {
2852  Kind = Name.slice(Next, StringRef::npos);
2853  if (!isValidVectorKind(Kind, MatchKind)) {
2854  TokError("invalid vector kind qualifier");
2855  return MatchOperand_ParseFail;
2856  }
2857  }
2858  Parser.Lex(); // Eat the register token.
2859 
2860  Reg = RegNum;
2861  return MatchOperand_Success;
2862  }
2863 
2864  return MatchOperand_NoMatch;
2865 }
2866 
2867 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
2869 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
2870  // Check for a SVE predicate register specifier first.
2871  const SMLoc S = getLoc();
2872  StringRef Kind;
2873  unsigned RegNum;
2874  auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
2875  if (Res != MatchOperand_Success)
2876  return Res;
2877 
2878  const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
2879  if (!KindRes)
2880  return MatchOperand_NoMatch;
2881 
2882  unsigned ElementWidth = KindRes->second;
2883  Operands.push_back(AArch64Operand::CreateVectorReg(
2884  RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
2885  getLoc(), getContext()));
2886 
2887  // Not all predicates are followed by a '/m' or '/z'.
2888  MCAsmParser &Parser = getParser();
2889  if (Parser.getTok().isNot(AsmToken::Slash))
2890  return MatchOperand_Success;
2891 
2892  // But when they do they shouldn't have an element type suffix.
2893  if (!Kind.empty()) {
2894  Error(S, "not expecting size suffix");
2895  return MatchOperand_ParseFail;
2896  }
2897 
2898  // Add a literal slash as operand
2899  Operands.push_back(
2900  AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
2901 
2902  Parser.Lex(); // Eat the slash.
2903 
2904  // Zeroing or merging?
2905  auto Pred = Parser.getTok().getString().lower();
2906  if (Pred != "z" && Pred != "m") {
2907  Error(getLoc(), "expecting 'm' or 'z' predication");
2908  return MatchOperand_ParseFail;
2909  }
2910 
2911  // Add zero/merge token.
2912  const char *ZM = Pred == "z" ? "z" : "m";
2913  Operands.push_back(
2914  AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
2915 
2916  Parser.Lex(); // Eat zero/merge token.
2917  return MatchOperand_Success;
2918 }
2919 
2920 /// parseRegister - Parse a register operand.
2921 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2922  // Try for a Neon vector register.
2923  if (!tryParseNeonVectorRegister(Operands))
2924  return false;
2925 
2926  // Otherwise try for a scalar register.
2927  if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
2928  return false;
2929 
2930  return true;
2931 }
2932 
2933 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2934  MCAsmParser &Parser = getParser();
2935  bool HasELFModifier = false;
2937 
2938  if (parseOptionalToken(AsmToken::Colon)) {
2939  HasELFModifier = true;
2940 
2941  if (Parser.getTok().isNot(AsmToken::Identifier))
2942  return TokError("expect relocation specifier in operand after ':'");
2943 
2944  std::string LowerCase = Parser.getTok().getIdentifier().lower();
2945  RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2946  .Case("lo12", AArch64MCExpr::VK_LO12)
2947  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2948  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2949  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2950  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2951  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2952  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2953  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2954  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2955  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2956  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2957  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2958  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2959  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2960  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2961  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2962  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2963  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2964  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2965  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2966  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2967  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2968  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2969  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2970  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2971  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2972  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2973  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2975  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2977  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2978  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2979  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2981  .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
2982  .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
2984 
2985  if (RefKind == AArch64MCExpr::VK_INVALID)
2986  return TokError("expect relocation specifier in operand after ':'");
2987 
2988  Parser.Lex(); // Eat identifier
2989 
2990  if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
2991  return true;
2992  }
2993 
2994  if (getParser().parseExpression(ImmVal))
2995  return true;
2996 
2997  if (HasELFModifier)
2998  ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2999 
3000  return false;
3001 }
3002 
3003 template <RegKind VectorKind>
3005 AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3006  bool ExpectMatch) {
3007  MCAsmParser &Parser = getParser();
3008  if (!Parser.getTok().is(AsmToken::LCurly))
3009  return MatchOperand_NoMatch;
3010 
3011  // Wrapper around parse function
3012  auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3013  bool NoMatchIsError) {
3014  auto RegTok = Parser.getTok();
3015  auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3016  if (ParseRes == MatchOperand_Success) {
3017  if (parseVectorKind(Kind, VectorKind))
3018  return ParseRes;
3019  llvm_unreachable("Expected a valid vector kind");
3020  }
3021 
3022  if (RegTok.isNot(AsmToken::Identifier) ||
3023  ParseRes == MatchOperand_ParseFail ||
3024  (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3025  Error(Loc, "vector register expected");
3026  return MatchOperand_ParseFail;
3027  }
3028 
3029  return MatchOperand_NoMatch;
3030  };
3031 
3032  SMLoc S = getLoc();
3033  auto LCurly = Parser.getTok();
3034  Parser.Lex(); // Eat left bracket token.
3035 
3036  StringRef Kind;
3037  unsigned FirstReg;
3038  auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3039 
3040  // Put back the original left bracket if there was no match, so that
3041  // different types of list-operands can be matched (e.g. SVE, Neon).
3042  if (ParseRes == MatchOperand_NoMatch)
3043  Parser.getLexer().UnLex(LCurly);
3044 
3045  if (ParseRes != MatchOperand_Success)
3046  return ParseRes;
3047 
3048  int64_t PrevReg = FirstReg;
3049  unsigned Count = 1;
3050 
3051  if (parseOptionalToken(AsmToken::Minus)) {
3052  SMLoc Loc = getLoc();
3053  StringRef NextKind;
3054 
3055  unsigned Reg;
3056  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3057  if (ParseRes != MatchOperand_Success)
3058  return ParseRes;
3059 
3060  // Any Kind suffices must match on all regs in the list.
3061  if (Kind != NextKind) {
3062  Error(Loc, "mismatched register size suffix");
3063  return MatchOperand_ParseFail;
3064  }
3065 
3066  unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3067 
3068  if (Space == 0 || Space > 3) {
3069  Error(Loc, "invalid number of vectors");
3070  return MatchOperand_ParseFail;
3071  }
3072 
3073  Count += Space;
3074  }
3075  else {
3076  while (parseOptionalToken(AsmToken::Comma)) {
3077  SMLoc Loc = getLoc();
3078  StringRef NextKind;
3079  unsigned Reg;
3080  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3081  if (ParseRes != MatchOperand_Success)
3082  return ParseRes;
3083 
3084  // Any Kind suffices must match on all regs in the list.
3085  if (Kind != NextKind) {
3086  Error(Loc, "mismatched register size suffix");
3087  return MatchOperand_ParseFail;
3088  }
3089 
3090  // Registers must be incremental (with wraparound at 31)
3091  if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3092  (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3093  Error(Loc, "registers must be sequential");
3094  return MatchOperand_ParseFail;
3095  }
3096 
3097  PrevReg = Reg;
3098  ++Count;
3099  }
3100  }
3101 
3102  if (parseToken(AsmToken::RCurly, "'}' expected"))
3103  return MatchOperand_ParseFail;
3104 
3105  if (Count > 4) {
3106  Error(S, "invalid number of vectors");
3107  return MatchOperand_ParseFail;
3108  }
3109 
3110  unsigned NumElements = 0;
3111  unsigned ElementWidth = 0;
3112  if (!Kind.empty()) {
3113  if (const auto &VK = parseVectorKind(Kind, VectorKind))
3114  std::tie(NumElements, ElementWidth) = *VK;
3115  }
3116 
3117  Operands.push_back(AArch64Operand::CreateVectorList(
3118  FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3119  getContext()));
3120 
3121  return MatchOperand_Success;
3122 }
3123 
3124 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3125 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3126  auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3127  if (ParseRes != MatchOperand_Success)
3128  return true;
3129 
3130  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3131 }
3132 
3134 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3135  SMLoc StartLoc = getLoc();
3136 
3137  unsigned RegNum;
3138  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3139  if (Res != MatchOperand_Success)
3140  return Res;
3141 
3142  if (!parseOptionalToken(AsmToken::Comma)) {
3143  Operands.push_back(AArch64Operand::CreateReg(
3144  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3145  return MatchOperand_Success;
3146  }
3147 
3148  parseOptionalToken(AsmToken::Hash);
3149 
3150  if (getParser().getTok().isNot(AsmToken::Integer)) {
3151  Error(getLoc(), "index must be absent or #0");
3152  return MatchOperand_ParseFail;
3153  }
3154 
3155  const MCExpr *ImmVal;
3156  if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3157  cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3158  Error(getLoc(), "index must be absent or #0");
3159  return MatchOperand_ParseFail;
3160  }
3161 
3162  Operands.push_back(AArch64Operand::CreateReg(
3163  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3164  return MatchOperand_Success;
3165 }
3166 
3167 template <bool ParseShiftExtend>
3169 AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3170  SMLoc StartLoc = getLoc();
3171 
3172  unsigned RegNum;
3173  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3174  if (Res != MatchOperand_Success)
3175  return Res;
3176 
3177  // No shift/extend is the default.
3178  if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3179  Operands.push_back(AArch64Operand::CreateReg(
3180  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3181  return MatchOperand_Success;
3182  }
3183 
3184  // Eat the comma
3185  getParser().Lex();
3186 
3187  // Match the shift
3189  Res = tryParseOptionalShiftExtend(ExtOpnd);
3190  if (Res != MatchOperand_Success)
3191  return Res;
3192 
3193  auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3194  Operands.push_back(AArch64Operand::CreateReg(RegNum, RegKind::Scalar,
3195  StartLoc, Ext->getEndLoc(), getContext(),
3196  Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3197  Ext->hasShiftExtendAmount()));
3198 
3199  return MatchOperand_Success;
3200 }
3201 
3202 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3203  MCAsmParser &Parser = getParser();
3204 
3205  // Some SVE instructions have a decoration after the immediate, i.e.
3206  // "mul vl". We parse them here and add tokens, which must be present in the
3207  // asm string in the tablegen instruction.
3208  bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3209  bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3210  if (!Parser.getTok().getString().equals_lower("mul") ||
3211  !(NextIsVL || NextIsHash))
3212  return true;
3213 
3214  Operands.push_back(
3215  AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3216  Parser.Lex(); // Eat the "mul"
3217 
3218  if (NextIsVL) {
3219  Operands.push_back(
3220  AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3221  Parser.Lex(); // Eat the "vl"
3222  return false;
3223  }
3224 
3225  if (NextIsHash) {
3226  Parser.Lex(); // Eat the #
3227  SMLoc S = getLoc();
3228 
3229  // Parse immediate operand.
3230  const MCExpr *ImmVal;
3231  if (!Parser.parseExpression(ImmVal))
3232  if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3233  Operands.push_back(AArch64Operand::CreateImm(
3234  MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3235  getContext()));
3236  return MatchOperand_Success;
3237  }
3238  }
3239 
3240  return Error(getLoc(), "expected 'vl' or '#<imm>'");
3241 }
3242 
3243 /// parseOperand - Parse a arm instruction operand. For now this parses the
3244 /// operand regardless of the mnemonic.
3245 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3246  bool invertCondCode) {
3247  MCAsmParser &Parser = getParser();
3248 
3249  OperandMatchResultTy ResTy =
3250  MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3251 
3252  // Check if the current operand has a custom associated parser, if so, try to
3253  // custom parse the operand, or fallback to the general approach.
3254  if (ResTy == MatchOperand_Success)
3255  return false;
3256  // If there wasn't a custom match, try the generic matcher below. Otherwise,
3257  // there was a match, but an error occurred, in which case, just return that
3258  // the operand parsing failed.
3259  if (ResTy == MatchOperand_ParseFail)
3260  return true;
3261 
3262  // Nothing custom, so do general case parsing.
3263  SMLoc S, E;
3264  switch (getLexer().getKind()) {
3265  default: {
3266  SMLoc S = getLoc();
3267  const MCExpr *Expr;
3268  if (parseSymbolicImmVal(Expr))
3269  return Error(S, "invalid operand");
3270 
3271  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3272  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3273  return false;
3274  }
3275  case AsmToken::LBrac: {
3276  SMLoc Loc = Parser.getTok().getLoc();
3277  Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3278  getContext()));
3279  Parser.Lex(); // Eat '['
3280 
3281  // There's no comma after a '[', so we can parse the next operand
3282  // immediately.
3283  return parseOperand(Operands, false, false);
3284  }
3285  case AsmToken::LCurly:
3286  return parseNeonVectorList(Operands);
3287  case AsmToken::Identifier: {
3288  // If we're expecting a Condition Code operand, then just parse that.
3289  if (isCondCode)
3290  return parseCondCode(Operands, invertCondCode);
3291 
3292  // If it's a register name, parse it.
3293  if (!parseRegister(Operands))
3294  return false;
3295 
3296  // See if this is a "mul vl" decoration or "mul #<int>" operand used
3297  // by SVE instructions.
3298  if (!parseOptionalMulOperand(Operands))
3299  return false;
3300 
3301  // This could be an optional "shift" or "extend" operand.
3302  OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3303  // We can only continue if no tokens were eaten.
3304  if (GotShift != MatchOperand_NoMatch)
3305  return GotShift;
3306 
3307  // This was not a register so parse other operands that start with an
3308  // identifier (like labels) as expressions and create them as immediates.
3309  const MCExpr *IdVal;
3310  S = getLoc();
3311  if (getParser().parseExpression(IdVal))
3312  return true;
3313  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3314  Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3315  return false;
3316  }
3317  case AsmToken::Integer:
3318  case AsmToken::Real:
3319  case AsmToken::Hash: {
3320  // #42 -> immediate.
3321  S = getLoc();
3322 
3323  parseOptionalToken(AsmToken::Hash);
3324 
3325  // Parse a negative sign
3326  bool isNegative = false;
3327  if (Parser.getTok().is(AsmToken::Minus)) {
3328  isNegative = true;
3329  // We need to consume this token only when we have a Real, otherwise
3330  // we let parseSymbolicImmVal take care of it
3331  if (Parser.getLexer().peekTok().is(AsmToken::Real))
3332  Parser.Lex();
3333  }
3334 
3335  // The only Real that should come through here is a literal #0.0 for
3336  // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3337  // so convert the value.
3338  const AsmToken &Tok = Parser.getTok();
3339  if (Tok.is(AsmToken::Real)) {
3340  APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3341  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3342  if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3343  Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3344  Mnemonic != "fcmlt")
3345  return TokError("unexpected floating point literal");
3346  else if (IntVal != 0 || isNegative)
3347  return TokError("expected floating-point constant #0.0");
3348  Parser.Lex(); // Eat the token.
3349 
3350  Operands.push_back(
3351  AArch64Operand::CreateToken("#0", false, S, getContext()));
3352  Operands.push_back(
3353  AArch64Operand::CreateToken(".0", false, S, getContext()));
3354  return false;
3355  }
3356 
3357  const MCExpr *ImmVal;
3358  if (parseSymbolicImmVal(ImmVal))
3359  return true;
3360 
3361  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3362  Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3363  return false;
3364  }
3365  case AsmToken::Equal: {
3366  SMLoc Loc = getLoc();
3367  if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3368  return TokError("unexpected token in operand");
3369  Parser.Lex(); // Eat '='
3370  const MCExpr *SubExprVal;
3371  if (getParser().parseExpression(SubExprVal))
3372  return true;
3373 
3374  if (Operands.size() < 2 ||
3375  !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3376  return Error(Loc, "Only valid when first operand is register");
3377 
3378  bool IsXReg =
3379  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3380  Operands[1]->getReg());
3381 
3382  MCContext& Ctx = getContext();
3383  E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3384  // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3385  if (isa<MCConstantExpr>(SubExprVal)) {
3386  uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3387  uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3388  while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3389  ShiftAmt += 16;
3390  Imm >>= 16;
3391  }
3392  if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3393  Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3394  Operands.push_back(AArch64Operand::CreateImm(
3395  MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3396  if (ShiftAmt)
3397  Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3398  ShiftAmt, true, S, E, Ctx));
3399  return false;
3400  }
3401  APInt Simm = APInt(64, Imm << ShiftAmt);
3402  // check if the immediate is an unsigned or signed 32-bit int for W regs
3403  if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3404  return Error(Loc, "Immediate too large for register");
3405  }
3406  // If it is a label or an imm that cannot fit in a movz, put it into CP.
3407  const MCExpr *CPLoc =
3408  getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3409  Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3410  return false;
3411  }
3412  }
3413 }
3414 
3415 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3416 /// operands.
3417 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3418  StringRef Name, SMLoc NameLoc,
3419  OperandVector &Operands) {
3420  MCAsmParser &Parser = getParser();
3421  Name = StringSwitch<StringRef>(Name.lower())
3422  .Case("beq", "b.eq")
3423  .Case("bne", "b.ne")
3424  .Case("bhs", "b.hs")
3425  .Case("bcs", "b.cs")
3426  .Case("blo", "b.lo")
3427  .Case("bcc", "b.cc")
3428  .Case("bmi", "b.mi")
3429  .Case("bpl", "b.pl")
3430  .Case("bvs", "b.vs")
3431  .Case("bvc", "b.vc")
3432  .Case("bhi", "b.hi")
3433  .Case("bls", "b.ls")
3434  .Case("bge", "b.ge")
3435  .Case("blt", "b.lt")
3436  .Case("bgt", "b.gt")
3437  .Case("ble", "b.le")
3438  .Case("bal", "b.al")
3439  .Case("bnv", "b.nv")
3440  .Default(Name);
3441 
3442  // First check for the AArch64-specific .req directive.
3443  if (Parser.getTok().is(AsmToken::Identifier) &&
3444  Parser.getTok().getIdentifier() == ".req") {
3445  parseDirectiveReq(Name, NameLoc);
3446  // We always return 'error' for this, as we're done with this
3447  // statement and don't need to match the 'instruction."
3448  return true;
3449  }
3450 
3451  // Create the leading tokens for the mnemonic, split by '.' characters.
3452  size_t Start = 0, Next = Name.find('.');
3453  StringRef Head = Name.slice(Start, Next);
3454 
3455  // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3456  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
3457  return parseSysAlias(Head, NameLoc, Operands);
3458 
3459  Operands.push_back(
3460  AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3461  Mnemonic = Head;
3462 
3463  // Handle condition codes for a branch mnemonic
3464  if (Head == "b" && Next != StringRef::npos) {
3465  Start = Next;
3466  Next = Name.find('.', Start + 1);
3467  Head = Name.slice(Start + 1, Next);
3468 
3469  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3470  (Head.data() - Name.data()));
3471  AArch64CC::CondCode CC = parseCondCodeString(Head);
3472  if (CC == AArch64CC::Invalid)
3473  return Error(SuffixLoc, "invalid condition code");
3474  Operands.push_back(
3475  AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3476  Operands.push_back(
3477  AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3478  }
3479 
3480  // Add the remaining tokens in the mnemonic.
3481  while (Next != StringRef::npos) {
3482  Start = Next;
3483  Next = Name.find('.', Start + 1);
3484  Head = Name.slice(Start, Next);
3485  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3486  (Head.data() - Name.data()) + 1);
3487  Operands.push_back(
3488  AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3489  }
3490 
3491  // Conditional compare instructions have a Condition Code operand, which needs
3492  // to be parsed and an immediate operand created.
3493  bool condCodeFourthOperand =
3494  (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3495  Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3496  Head == "csinc" || Head == "csinv" || Head == "csneg");
3497 
3498  // These instructions are aliases to some of the conditional select
3499  // instructions. However, the condition code is inverted in the aliased
3500  // instruction.
3501  //
3502  // FIXME: Is this the correct way to handle these? Or should the parser
3503  // generate the aliased instructions directly?
3504  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3505  bool condCodeThirdOperand =
3506  (Head == "cinc" || Head == "cinv" || Head == "cneg");
3507 
3508  // Read the remaining operands.
3509  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3510  // Read the first operand.
3511  if (parseOperand(Operands, false, false)) {
3512  return true;
3513  }
3514 
3515  unsigned N = 2;
3516  while (parseOptionalToken(AsmToken::Comma)) {
3517  // Parse and remember the operand.
3518  if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3519  (N == 3 && condCodeThirdOperand) ||
3520  (N == 2 && condCodeSecondOperand),
3521  condCodeSecondOperand || condCodeThirdOperand)) {
3522  return true;
3523  }
3524 
3525  // After successfully parsing some operands there are two special cases to
3526  // consider (i.e. notional operands not separated by commas). Both are due
3527  // to memory specifiers:
3528  // + An RBrac will end an address for load/store/prefetch
3529  // + An '!' will indicate a pre-indexed operation.
3530  //
3531  // It's someone else's responsibility to make sure these tokens are sane
3532  // in the given context!
3533 
3534  SMLoc RLoc = Parser.getTok().getLoc();
3535  if (parseOptionalToken(AsmToken::RBrac))
3536  Operands.push_back(
3537  AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3538  SMLoc ELoc = Parser.getTok().getLoc();
3539  if (parseOptionalToken(AsmToken::Exclaim))
3540  Operands.push_back(
3541  AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3542 
3543  ++N;
3544  }
3545  }
3546 
3547  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3548  return true;
3549 
3550  return false;
3551 }
3552 
3553 // FIXME: This entire function is a giant hack to provide us with decent
3554 // operand range validation/diagnostics until TableGen/MC can be extended
3555 // to support autogeneration of this kind of validation.
3556 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3557  SmallVectorImpl<SMLoc> &Loc) {
3558  const MCRegisterInfo *RI = getContext().getRegisterInfo();
3559  // Check for indexed addressing modes w/ the base register being the
3560  // same as a destination/source register or pair load where
3561  // the Rt == Rt2. All of those are undefined behaviour.
3562  switch (Inst.getOpcode()) {
3563  case AArch64::LDPSWpre:
3564  case AArch64::LDPWpost:
3565  case AArch64::LDPWpre:
3566  case AArch64::LDPXpost:
3567  case AArch64::LDPXpre: {
3568  unsigned Rt = Inst.getOperand(1).getReg();
3569  unsigned Rt2 = Inst.getOperand(2).getReg();
3570  unsigned Rn = Inst.getOperand(3).getReg();
3571  if (RI->isSubRegisterEq(Rn, Rt))
3572  return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3573  "is also a destination");
3574  if (RI->isSubRegisterEq(Rn, Rt2))
3575  return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3576  "is also a destination");
3578  }
3579  case AArch64::LDPDi:
3580  case AArch64::LDPQi:
3581  case AArch64::LDPSi:
3582  case AArch64::LDPSWi:
3583  case AArch64::LDPWi:
3584  case AArch64::LDPXi: {
3585  unsigned Rt = Inst.getOperand(0).getReg();
3586  unsigned Rt2 = Inst.getOperand(1).getReg();
3587  if (Rt == Rt2)
3588  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3589  break;
3590  }
3591  case AArch64::LDPDpost:
3592  case AArch64::LDPDpre:
3593  case AArch64::LDPQpost:
3594  case AArch64::LDPQpre:
3595  case AArch64::LDPSpost:
3596  case AArch64::LDPSpre:
3597  case AArch64::LDPSWpost: {
3598  unsigned Rt = Inst.getOperand(1).getReg();
3599  unsigned Rt2 = Inst.getOperand(2).getReg();
3600  if (Rt == Rt2)
3601  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3602  break;
3603  }
3604  case AArch64::STPDpost:
3605  case AArch64::STPDpre:
3606  case AArch64::STPQpost:
3607  case AArch64::STPQpre:
3608  case AArch64::STPSpost:
3609  case AArch64::STPSpre:
3610  case AArch64::STPWpost:
3611  case AArch64::STPWpre:
3612  case AArch64::STPXpost:
3613  case AArch64::STPXpre: {
3614  unsigned Rt = Inst.getOperand(1).getReg();
3615  unsigned Rt2 = Inst.getOperand(2).getReg();
3616  unsigned Rn = Inst.getOperand(3).getReg();
3617  if (RI->isSubRegisterEq(Rn, Rt))
3618  return Error(Loc[0], "unpredictable STP instruction, writeback base "
3619  "is also a source");
3620  if (RI->isSubRegisterEq(Rn, Rt2))
3621  return Error(Loc[1], "unpredictable STP instruction, writeback base "
3622  "is also a source");
3623  break;
3624  }
3625  case AArch64::LDRBBpre:
3626  case AArch64::LDRBpre:
3627  case AArch64::LDRHHpre:
3628  case AArch64::LDRHpre:
3629  case AArch64::LDRSBWpre:
3630  case AArch64::LDRSBXpre:
3631  case AArch64::LDRSHWpre:
3632  case AArch64::LDRSHXpre:
3633  case AArch64::LDRSWpre:
3634  case AArch64::LDRWpre:
3635  case AArch64::LDRXpre:
3636  case AArch64::LDRBBpost:
3637  case AArch64::LDRBpost:
3638  case AArch64::LDRHHpost:
3639  case AArch64::LDRHpost:
3640  case AArch64::LDRSBWpost:
3641  case AArch64::LDRSBXpost:
3642  case AArch64::LDRSHWpost:
3643  case AArch64::LDRSHXpost:
3644  case AArch64::LDRSWpost:
3645  case AArch64::LDRWpost:
3646  case AArch64::LDRXpost: {
3647  unsigned Rt = Inst.getOperand(1).getReg();
3648  unsigned Rn = Inst.getOperand(2).getReg();
3649  if (RI->isSubRegisterEq(Rn, Rt))
3650  return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3651  "is also a source");
3652  break;
3653  }
3654  case AArch64::STRBBpost:
3655  case AArch64::STRBpost:
3656  case AArch64::STRHHpost:
3657  case AArch64::STRHpost:
3658  case AArch64::STRWpost:
3659  case AArch64::STRXpost:
3660  case AArch64::STRBBpre:
3661  case AArch64::STRBpre:
3662  case AArch64::STRHHpre:
3663  case AArch64::STRHpre:
3664  case AArch64::STRWpre:
3665  case AArch64::STRXpre: {
3666  unsigned Rt = Inst.getOperand(1).getReg();
3667  unsigned Rn = Inst.getOperand(2).getReg();
3668  if (RI->isSubRegisterEq(Rn, Rt))
3669  return Error(Loc[0], "unpredictable STR instruction, writeback base "
3670  "is also a source");
3671  break;
3672  }
3673  case AArch64::STXRB:
3674  case AArch64::STXRH:
3675  case AArch64::STXRW:
3676  case AArch64::STXRX:
3677  case AArch64::STLXRB:
3678  case AArch64::STLXRH:
3679  case AArch64::STLXRW:
3680  case AArch64::STLXRX: {
3681  unsigned Rs = Inst.getOperand(0).getReg();
3682  unsigned Rt = Inst.getOperand(1).getReg();
3683  unsigned Rn = Inst.getOperand(2).getReg();
3684  if (RI->isSubRegisterEq(Rt, Rs) ||
3685  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
3686  return Error(Loc[0],
3687  "unpredictable STXR instruction, status is also a source");
3688  break;
3689  }
3690  case AArch64::STXPW:
3691  case AArch64::STXPX:
3692  case AArch64::STLXPW:
3693  case AArch64::STLXPX: {
3694  unsigned Rs = Inst.getOperand(0).getReg();
3695  unsigned Rt1 = Inst.getOperand(1).getReg();
3696  unsigned Rt2 = Inst.getOperand(2).getReg();
3697  unsigned Rn = Inst.getOperand(3).getReg();
3698  if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
3699  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
3700  return Error(Loc[0],
3701  "unpredictable STXP instruction, status is also a source");
3702  break;
3703  }
3704  }
3705 
3706 
3707  // Now check immediate ranges. Separate from the above as there is overlap
3708  // in the instructions being checked and this keeps the nested conditionals
3709  // to a minimum.
3710  switch (Inst.getOpcode()) {
3711  case AArch64::ADDSWri:
3712  case AArch64::ADDSXri:
3713  case AArch64::ADDWri:
3714  case AArch64::ADDXri:
3715  case AArch64::SUBSWri:
3716  case AArch64::SUBSXri:
3717  case AArch64::SUBWri:
3718  case AArch64::SUBXri: {
3719  // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3720  // some slight duplication here.
3721  if (Inst.getOperand(2).isExpr()) {
3722  const MCExpr *Expr = Inst.getOperand(2).getExpr();
3723  AArch64MCExpr::VariantKind ELFRefKind;
3724  MCSymbolRefExpr::VariantKind DarwinRefKind;
3725  int64_t Addend;
3726  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3727 
3728  // Only allow these with ADDXri.
3729  if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3730  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3731  Inst.getOpcode() == AArch64::ADDXri)
3732  return false;
3733 
3734  // Only allow these with ADDXri/ADDWri
3735  if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3736  ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3737  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3738  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3739  ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3740  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3741  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3742  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
3743  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
3744  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
3745  (Inst.getOpcode() == AArch64::ADDXri ||
3746  Inst.getOpcode() == AArch64::ADDWri))
3747  return false;
3748 
3749  // Don't allow symbol refs in the immediate field otherwise
3750  // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
3751  // operands of the original instruction (i.e. 'add w0, w1, borked' vs
3752  // 'cmp w0, 'borked')
3753  return Error(Loc.back(), "invalid immediate expression");
3754  }
3755  // We don't validate more complex expressions here
3756  }
3757  return false;
3758  }
3759  default:
3760  return false;
3761  }
3762 }
3763 
3764 static std::string AArch64MnemonicSpellCheck(StringRef S, uint64_t FBS,
3765  unsigned VariantID = 0);
3766 
3767 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
3768  OperandVector &Operands) {
3769  switch (ErrCode) {
3770  case Match_InvalidTiedOperand:
3771  return Error(Loc, "operand must match destination register");
3772  case Match_MissingFeature:
3773  return Error(Loc,
3774  "instruction requires a CPU feature not currently enabled");
3775  case Match_InvalidOperand:
3776  return Error(Loc, "invalid operand for instruction");
3777  case Match_InvalidSuffix:
3778  return Error(Loc, "invalid type suffix for instruction");
3779  case Match_InvalidCondCode:
3780  return Error(Loc, "expected AArch64 condition code");
3781  case Match_AddSubRegExtendSmall:
3782  return Error(Loc,
3783  "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3784  case Match_AddSubRegExtendLarge:
3785  return Error(Loc,
3786  "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3787  case Match_AddSubSecondSource:
3788  return Error(Loc,
3789  "expected compatible register, symbol or integer in range [0, 4095]");
3790  case Match_LogicalSecondSource:
3791  return Error(Loc, "expected compatible register or logical immediate");
3792  case Match_InvalidMovImm32Shift:
3793  return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3794  case Match_InvalidMovImm64Shift:
3795  return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3796  case Match_AddSubRegShift32:
3797  return Error(Loc,
3798  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3799  case Match_AddSubRegShift64:
3800  return Error(Loc,
3801  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3802  case Match_InvalidFPImm:
3803  return Error(Loc,
3804  "expected compatible register or floating-point constant");
3805  case Match_InvalidMemoryIndexedSImm6:
3806  return Error(Loc, "index must be an integer in range [-32, 31].");
3807  case Match_InvalidMemoryIndexedSImm5:
3808  return Error(Loc, "index must be an integer in range [-16, 15].");
3809  case Match_InvalidMemoryIndexed1SImm4:
3810  return Error(Loc, "index must be an integer in range [-8, 7].");
3811  case Match_InvalidMemoryIndexed2SImm4:
3812  return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
3813  case Match_InvalidMemoryIndexed3SImm4:
3814  return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
3815  case Match_InvalidMemoryIndexed4SImm4:
3816  return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
3817  case Match_InvalidMemoryIndexed16SImm4:
3818  return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
3819  case Match_InvalidMemoryIndexed1SImm6:
3820  return Error(Loc, "index must be an integer in range [-32, 31].");
3821  case Match_InvalidMemoryIndexedSImm9:
3822  return Error(Loc, "index must be an integer in range [-256, 255].");
3823  case Match_InvalidMemoryIndexed8SImm10:
3824  return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
3825  case Match_InvalidMemoryIndexed4SImm7:
3826  return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3827  case Match_InvalidMemoryIndexed8SImm7:
3828  return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3829  case Match_InvalidMemoryIndexed16SImm7:
3830  return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3831  case Match_InvalidMemoryIndexed8UImm5:
3832  return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
3833  case Match_InvalidMemoryIndexed4UImm5:
3834  return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
3835  case Match_InvalidMemoryIndexed2UImm5:
3836  return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
3837  case Match_InvalidMemoryIndexed8UImm6:
3838  return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
3839  case Match_InvalidMemoryIndexed4UImm6:
3840  return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
3841  case Match_InvalidMemoryIndexed2UImm6:
3842  return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
3843  case Match_InvalidMemoryIndexed1UImm6:
3844  return Error(Loc, "index must be in range [0, 63].");
3845  case Match_InvalidMemoryWExtend8:
3846  return Error(Loc,
3847  "expected 'uxtw' or 'sxtw' with optional shift of #0");
3848  case Match_InvalidMemoryWExtend16:
3849  return Error(Loc,
3850  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3851  case Match_InvalidMemoryWExtend32:
3852  return Error(Loc,
3853  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3854  case Match_InvalidMemoryWExtend64:
3855  return Error(Loc,
3856  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3857  case Match_InvalidMemoryWExtend128:
3858  return Error(Loc,
3859  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3860  case Match_InvalidMemoryXExtend8:
3861  return Error(Loc,
3862  "expected 'lsl' or 'sxtx' with optional shift of #0");
3863  case Match_InvalidMemoryXExtend16:
3864  return Error(Loc,
3865  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3866  case Match_InvalidMemoryXExtend32:
3867  return Error(Loc,
3868  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3869  case Match_InvalidMemoryXExtend64:
3870  return Error(Loc,
3871  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3872  case Match_InvalidMemoryXExtend128:
3873  return Error(Loc,
3874  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3875  case Match_InvalidMemoryIndexed1:
3876  return Error(Loc, "index must be an integer in range [0, 4095].");
3877  case Match_InvalidMemoryIndexed2:
3878  return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3879  case Match_InvalidMemoryIndexed4:
3880  return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3881  case Match_InvalidMemoryIndexed8:
3882  return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3883  case Match_InvalidMemoryIndexed16:
3884  return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3885  case Match_InvalidImm0_1:
3886  return Error(Loc, "immediate must be an integer in range [0, 1].");
3887  case Match_InvalidImm0_7:
3888  return Error(Loc, "immediate must be an integer in range [0, 7].");
3889  case Match_InvalidImm0_15:
3890  return Error(Loc, "immediate must be an integer in range [0, 15].");
3891  case Match_InvalidImm0_31:
3892  return Error(Loc, "immediate must be an integer in range [0, 31].");
3893  case Match_InvalidImm0_63:
3894  return Error(Loc, "immediate must be an integer in range [0, 63].");
3895  case Match_InvalidImm0_127:
3896  return Error(Loc, "immediate must be an integer in range [0, 127].");
3897  case Match_InvalidImm0_255:
3898  return Error(Loc, "immediate must be an integer in range [0, 255].");
3899  case Match_InvalidImm0_65535:
3900  return Error(Loc, "immediate must be an integer in range [0, 65535].");
3901  case Match_InvalidImm1_8:
3902  return Error(Loc, "immediate must be an integer in range [1, 8].");
3903  case Match_InvalidImm1_16:
3904  return Error(Loc, "immediate must be an integer in range [1, 16].");
3905  case Match_InvalidImm1_32:
3906  return Error(Loc, "immediate must be an integer in range [1, 32].");
3907  case Match_InvalidImm1_64:
3908  return Error(Loc, "immediate must be an integer in range [1, 64].");
3909  case Match_InvalidSVEAddSubImm8:
3910  return Error(Loc, "immediate must be an integer in range [0, 255]"
3911  " with a shift amount of 0");
3912  case Match_InvalidSVEAddSubImm16:
3913  case Match_InvalidSVEAddSubImm32:
3914  case Match_InvalidSVEAddSubImm64:
3915  return Error(Loc, "immediate must be an integer in range [0, 255] or a "
3916  "multiple of 256 in range [256, 65280]");
3917  case Match_InvalidSVECpyImm8:
3918  return Error(Loc, "immediate must be an integer in range [-128, 255]"
3919  " with a shift amount of 0");
3920  case Match_InvalidSVECpyImm16:
3921  return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
3922  "multiple of 256 in range [-32768, 65280]");
3923  case Match_InvalidSVECpyImm32:
3924  case Match_InvalidSVECpyImm64:
3925  return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
3926  "multiple of 256 in range [-32768, 32512]");
3927  case Match_InvalidIndexRange1_1:
3928  return Error(Loc, "expected lane specifier '[1]'");
3929  case Match_InvalidIndexRange0_15:
3930  return Error(Loc, "vector lane must be an integer in range [0, 15].");
3931  case Match_InvalidIndexRange0_7:
3932  return Error(Loc, "vector lane must be an integer in range [0, 7].");
3933  case Match_InvalidIndexRange0_3:
3934  return Error(Loc, "vector lane must be an integer in range [0, 3].");
3935  case Match_InvalidIndexRange0_1:
3936  return Error(Loc, "vector lane must be an integer in range [0, 1].");
3937  case Match_InvalidSVEIndexRange0_63:
3938  return Error(Loc, "vector lane must be an integer in range [0, 63].");
3939  case Match_InvalidSVEIndexRange0_31:
3940  return Error(Loc, "vector lane must be an integer in range [0, 31].");
3941  case Match_InvalidSVEIndexRange0_15:
3942  return Error(Loc, "vector lane must be an integer in range [0, 15].");
3943  case Match_InvalidSVEIndexRange0_7:
3944  return Error(Loc, "vector lane must be an integer in range [0, 7].");
3945  case Match_InvalidSVEIndexRange0_3:
3946  return Error(Loc, "vector lane must be an integer in range [0, 3].");
3947  case Match_InvalidLabel:
3948  return Error(Loc, "expected label or encodable integer pc offset");
3949  case Match_MRS:
3950  return Error(Loc, "expected readable system register");
3951  case Match_MSR:
3952  return Error(Loc, "expected writable system register or pstate");
3953  case Match_InvalidComplexRotationEven:
3954  return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
3955  case Match_InvalidComplexRotationOdd:
3956  return Error(Loc, "complex rotation must be 90 or 270.");
3957  case Match_MnemonicFail: {
3958  std::string Suggestion = AArch64MnemonicSpellCheck(
3959  ((AArch64Operand &)*Operands[0]).getToken(),
3960  ComputeAvailableFeatures(STI->getFeatureBits()));
3961  return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
3962  }
3963  case Match_InvalidGPR64shifted8:
3964  return Error(Loc, "register must be x0..x30 or xzr, without shift");
3965  case Match_InvalidGPR64shifted16:
3966  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
3967  case Match_InvalidGPR64shifted32:
3968  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
3969  case Match_InvalidGPR64shifted64:
3970  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
3971  case Match_InvalidGPR64NoXZRshifted8:
3972  return Error(Loc, "register must be x0..x30 without shift");
3973  case Match_InvalidGPR64NoXZRshifted16:
3974  return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
3975  case Match_InvalidGPR64NoXZRshifted32:
3976  return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
3977  case Match_InvalidGPR64NoXZRshifted64:
3978  return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
3979  case Match_InvalidZPR32UXTW8:
3980  case Match_InvalidZPR32SXTW8:
3981  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
3982  case Match_InvalidZPR32UXTW16:
3983  case Match_InvalidZPR32SXTW16:
3984  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
3985  case Match_InvalidZPR32UXTW32:
3986  case Match_InvalidZPR32SXTW32:
3987  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
3988  case Match_InvalidZPR32UXTW64:
3989  case Match_InvalidZPR32SXTW64:
3990  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
3991  case Match_InvalidZPR64UXTW8:
3992  case Match_InvalidZPR64SXTW8:
3993  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
3994  case Match_InvalidZPR64UXTW16:
3995  case Match_InvalidZPR64SXTW16:
3996  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
3997  case Match_InvalidZPR64UXTW32:
3998  case Match_InvalidZPR64SXTW32:
3999  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4000  case Match_InvalidZPR64UXTW64:
4001  case Match_InvalidZPR64SXTW64:
4002  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4003  case Match_InvalidZPR64LSL8:
4004  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4005  case Match_InvalidZPR64LSL16:
4006  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4007  case Match_InvalidZPR64LSL32:
4008  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4009  case Match_InvalidZPR64LSL64:
4010  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4011  case Match_InvalidZPR0:
4012  return Error(Loc, "expected register without element width sufix");
4013  case Match_InvalidZPR8:
4014  case Match_InvalidZPR16:
4015  case Match_InvalidZPR32:
4016  case Match_InvalidZPR64:
4017  case Match_InvalidZPR128:
4018  return Error(Loc, "invalid element width");
4019  case Match_InvalidSVEPattern:
4020  return Error(Loc, "invalid predicate pattern");
4021  case Match_InvalidSVEPredicateAnyReg:
4022  case Match_InvalidSVEPredicateBReg:
4023  case Match_InvalidSVEPredicateHReg:
4024  case Match_InvalidSVEPredicateSReg:
4025  case Match_InvalidSVEPredicateDReg:
4026  return Error(Loc, "invalid predicate register.");
4027  case Match_InvalidSVEPredicate3bAnyReg:
4028  case Match_InvalidSVEPredicate3bBReg:
4029  case Match_InvalidSVEPredicate3bHReg:
4030  case Match_InvalidSVEPredicate3bSReg:
4031  case Match_InvalidSVEPredicate3bDReg:
4032  return Error(Loc, "restricted predicate has range [0, 7].");
4033  case Match_InvalidSVEExactFPImmOperandHalfOne:
4034  return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4035  case Match_InvalidSVEExactFPImmOperandHalfTwo:
4036  return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4037  case Match_InvalidSVEExactFPImmOperandZeroOne:
4038  return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4039  default:
4040  llvm_unreachable("unexpected error code!");
4041  }
4042 }
4043 
4044 static const char *getSubtargetFeatureName(uint64_t Val);
4045 
4046 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4047  OperandVector &Operands,
4048  MCStreamer &Out,
4049  uint64_t &ErrorInfo,
4050  bool MatchingInlineAsm) {
4051  assert(!Operands.empty() && "Unexpect empty operand list!");
4052  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4053  assert(Op.isToken() && "Leading operand should always be a mnemonic!");
4054 
4055  StringRef Tok = Op.getToken();
4056  unsigned NumOperands = Operands.size();
4057 
4058  if (NumOperands == 4 && Tok == "lsl") {
4059  AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4060  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4061  if (Op2.isScalarReg() && Op3.isImm()) {
4062  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4063  if (Op3CE) {
4064  uint64_t Op3Val = Op3CE->getValue();
4065  uint64_t NewOp3Val = 0;
4066  uint64_t NewOp4Val = 0;
4067  if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4068  Op2.getReg())) {
4069  NewOp3Val = (32 - Op3Val) & 0x1f;
4070  NewOp4Val = 31 - Op3Val;
4071  } else {
4072  NewOp3Val = (64 - Op3Val) & 0x3f;
4073  NewOp4Val = 63 - Op3Val;
4074  }
4075 
4076  const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4077  const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4078 
4079  Operands[0] = AArch64Operand::CreateToken(
4080  "ubfm", false, Op.getStartLoc(), getContext());
4081  Operands.push_back(AArch64Operand::CreateImm(
4082  NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4083  Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4084  Op3.getEndLoc(), getContext());
4085  }
4086  }
4087  } else if (NumOperands == 4 && Tok == "bfc") {
4088  // FIXME: Horrible hack to handle BFC->BFM alias.
4089  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4090  AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4091  AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4092 
4093  if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4094  const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4095  const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4096 
4097  if (LSBCE && WidthCE) {
4098  uint64_t LSB = LSBCE->getValue();
4099  uint64_t Width = WidthCE->getValue();
4100 
4101  uint64_t RegWidth = 0;
4102  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4103  Op1.getReg()))
4104  RegWidth = 64;
4105  else
4106  RegWidth = 32;
4107 
4108  if (LSB >= RegWidth)
4109  return Error(LSBOp.getStartLoc(),
4110  "expected integer in range [0, 31]");
4111  if (Width < 1 || Width > RegWidth)
4112  return Error(WidthOp.getStartLoc(),
4113  "expected integer in range [1, 32]");
4114 
4115  uint64_t ImmR = 0;
4116  if (RegWidth == 32)
4117  ImmR = (32 - LSB) & 0x1f;
4118  else
4119  ImmR = (64 - LSB) & 0x3f;
4120 
4121  uint64_t ImmS = Width - 1;
4122 
4123  if (ImmR != 0 && ImmS >= ImmR)
4124  return Error(WidthOp.getStartLoc(),
4125  "requested insert overflows register");
4126 
4127  const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4128  const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4129  Operands[0] = AArch64Operand::CreateToken(
4130  "bfm", false, Op.getStartLoc(), getContext());
4131  Operands[2] = AArch64Operand::CreateReg(
4132  RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4133  SMLoc(), SMLoc(), getContext());
4134  Operands[3] = AArch64Operand::CreateImm(
4135  ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4136  Operands.emplace_back(
4137  AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4138  WidthOp.getEndLoc(), getContext()));
4139  }
4140  }
4141  } else if (NumOperands == 5) {
4142  // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4143  // UBFIZ -> UBFM aliases.
4144  if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4145  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4146  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4147  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4148 
4149  if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4150  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4151  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4152 
4153  if (Op3CE && Op4CE) {
4154  uint64_t Op3Val = Op3CE->getValue();
4155  uint64_t Op4Val = Op4CE->getValue();
4156 
4157  uint64_t RegWidth = 0;
4158  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4159  Op1.getReg()))
4160  RegWidth = 64;
4161  else
4162  RegWidth = 32;
4163 
4164  if (Op3Val >= RegWidth)
4165  return Error(Op3.getStartLoc(),
4166  "expected integer in range [0, 31]");
4167  if (Op4Val < 1 || Op4Val > RegWidth)
4168  return Error(Op4.getStartLoc(),
4169  "expected integer in range [1, 32]");
4170 
4171  uint64_t NewOp3Val = 0;
4172  if (RegWidth == 32)
4173  NewOp3Val = (32 - Op3Val) & 0x1f;
4174  else
4175  NewOp3Val = (64 - Op3Val) & 0x3f;
4176 
4177  uint64_t NewOp4Val = Op4Val - 1;
4178 
4179  if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4180  return Error(Op4.getStartLoc(),
4181  "requested insert overflows register");
4182 
4183  const MCExpr *NewOp3 =
4184  MCConstantExpr::create(NewOp3Val, getContext());
4185  const MCExpr *NewOp4 =
4186  MCConstantExpr::create(NewOp4Val, getContext());
4187  Operands[3] = AArch64Operand::CreateImm(
4188  NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4189  Operands[4] = AArch64Operand::CreateImm(
4190  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4191  if (Tok == "bfi")
4192  Operands[0] = AArch64Operand::CreateToken(
4193  "bfm", false, Op.getStartLoc(), getContext());
4194  else if (Tok == "sbfiz")
4195  Operands[0] = AArch64Operand::CreateToken(
4196  "sbfm", false, Op.getStartLoc(), getContext());
4197  else if (Tok == "ubfiz")
4198  Operands[0] = AArch64Operand::CreateToken(
4199  "ubfm", false, Op.getStartLoc(), getContext());
4200  else
4201  llvm_unreachable("No valid mnemonic for alias?");
4202  }
4203  }
4204 
4205  // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4206  // UBFX -> UBFM aliases.
4207  } else if (NumOperands == 5 &&
4208  (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4209  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4210  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4211  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4212 
4213  if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4214  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4215  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4216 
4217  if (Op3CE && Op4CE) {
4218  uint64_t Op3Val = Op3CE->getValue();
4219  uint64_t Op4Val = Op4CE->getValue();
4220 
4221  uint64_t RegWidth = 0;
4222  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4223  Op1.getReg()))
4224  RegWidth = 64;
4225  else
4226  RegWidth = 32;
4227 
4228  if (Op3Val >= RegWidth)
4229  return Error(Op3.getStartLoc(),
4230  "expected integer in range [0, 31]");
4231  if (Op4Val < 1 || Op4Val > RegWidth)
4232  return Error(Op4.getStartLoc(),
4233  "expected integer in range [1, 32]");
4234 
4235  uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4236 
4237  if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4238  return Error(Op4.getStartLoc(),
4239  "requested extract overflows register");
4240 
4241  const MCExpr *NewOp4 =
4242  MCConstantExpr::create(NewOp4Val, getContext());
4243  Operands[4] = AArch64Operand::CreateImm(
4244  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4245  if (Tok == "bfxil")
4246  Operands[0] = AArch64Operand::CreateToken(
4247  "bfm", false, Op.getStartLoc(), getContext());
4248  else if (Tok == "sbfx")
4249  Operands[0] = AArch64Operand::CreateToken(
4250  "sbfm", false, Op.getStartLoc(), getContext());
4251  else if (Tok == "ubfx")
4252  Operands[0] = AArch64Operand::CreateToken(
4253  "ubfm", false, Op.getStartLoc(), getContext());
4254  else
4255  llvm_unreachable("No valid mnemonic for alias?");
4256  }
4257  }
4258  }
4259  }
4260 
4261  // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4262  // instruction for FP registers correctly in some rare circumstances. Convert
4263  // it to a safe instruction and warn (because silently changing someone's
4264  // assembly is rude).
4265  if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4266  NumOperands == 4 && Tok == "movi") {
4267  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4268  AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4269  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4270  if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4271  (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4272  StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4273  if (Suffix.lower() == ".2d" &&
4274  cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4275  Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4276  " correctly on this CPU, converting to equivalent movi.16b");
4277  // Switch the suffix to .16b.
4278  unsigned Idx = Op1.isToken() ? 1 : 2;
4279  Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4280  getContext());
4281  }
4282  }
4283  }
4284 
4285  // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4286  // InstAlias can't quite handle this since the reg classes aren't
4287  // subclasses.
4288  if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4289  // The source register can be Wn here, but the matcher expects a
4290  // GPR64. Twiddle it here if necessary.
4291  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4292  if (Op.isScalarReg()) {
4293  unsigned Reg = getXRegFromWReg(Op.getReg());
4294  Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4295  Op.getStartLoc(), Op.getEndLoc(),
4296  getContext());
4297  }
4298  }
4299  // FIXME: Likewise for sxt[bh] with a Xd dst operand
4300  else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4301  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4302  if (Op.isScalarReg() &&
4303  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4304  Op.getReg())) {
4305  // The source register can be Wn here, but the matcher expects a
4306  // GPR64. Twiddle it here if necessary.
4307  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4308  if (Op.isScalarReg()) {
4309  unsigned Reg = getXRegFromWReg(Op.getReg());
4310  Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4311  Op.getStartLoc(),
4312  Op.getEndLoc(), getContext());
4313  }
4314  }
4315  }
4316  // FIXME: Likewise for uxt[bh] with a Xd dst operand
4317  else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4318  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4319  if (Op.isScalarReg() &&
4320  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4321  Op.getReg())) {
4322  // The source register can be Wn here, but the matcher expects a
4323  // GPR32. Twiddle it here if necessary.
4324  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4325  if (Op.isScalarReg()) {
4326  unsigned Reg = getWRegFromXReg(Op.getReg());
4327  Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4328  Op.getStartLoc(),
4329  Op.getEndLoc(), getContext());
4330  }
4331  }
4332  }
4333 
4334  MCInst Inst;
4335  // First try to match against the secondary set of tables containing the
4336  // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4337  unsigned MatchResult =
4338  MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4339 
4340  // If that fails, try against the alternate table containing long-form NEON:
4341  // "fadd v0.2s, v1.2s, v2.2s"
4342  if (MatchResult != Match_Success) {
4343  // But first, save the short-form match result: we can use it in case the
4344  // long-form match also fails.
4345  auto ShortFormNEONErrorInfo = ErrorInfo;
4346  auto ShortFormNEONMatchResult = MatchResult;
4347 
4348  MatchResult =
4349  MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4350 
4351  // Now, both matches failed, and the long-form match failed on the mnemonic
4352  // suffix token operand. The short-form match failure is probably more
4353  // relevant: use it instead.
4354  if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4355  Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4356  ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4357  MatchResult = ShortFormNEONMatchResult;
4358  ErrorInfo = ShortFormNEONErrorInfo;
4359  }
4360  }
4361 
4362  switch (MatchResult) {
4363  case Match_Success: {
4364  // Perform range checking and other semantic validations
4365  SmallVector<SMLoc, 8> OperandLocs;
4366  NumOperands = Operands.size();
4367  for (unsigned i = 1; i < NumOperands; ++i)
4368  OperandLocs.push_back(Operands[i]->getStartLoc());
4369  if (validateInstruction(Inst, OperandLocs))
4370  return true;
4371 
4372  Inst.setLoc(IDLoc);
4373  Out.EmitInstruction(Inst, getSTI());
4374  return false;
4375  }
4376  case Match_MissingFeature: {
4377  assert(ErrorInfo && "Unknown missing feature!");
4378  // Special case the error message for the very common case where only
4379  // a single subtarget feature is missing (neon, e.g.).
4380  std::string Msg = "instruction requires:";
4381  uint64_t Mask = 1;
4382  for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4383  if (ErrorInfo & Mask) {
4384  Msg += " ";
4385  Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4386  }
4387  Mask <<= 1;
4388  }
4389  return Error(IDLoc, Msg);
4390  }
4391  case Match_MnemonicFail:
4392  return showMatchError(IDLoc, MatchResult, Operands);
4393  case Match_InvalidOperand: {
4394  SMLoc ErrorLoc = IDLoc;
4395 
4396  if (ErrorInfo != ~0ULL) {
4397  if (ErrorInfo >= Operands.size())
4398  return Error(IDLoc, "too few operands for instruction",
4399  SMRange(IDLoc, getTok().getLoc()));
4400 
4401  ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4402  if (ErrorLoc == SMLoc())
4403  ErrorLoc = IDLoc;
4404  }
4405  // If the match failed on a suffix token operand, tweak the diagnostic
4406  // accordingly.
4407  if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4408  ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4409  MatchResult = Match_InvalidSuffix;
4410 
4411  return showMatchError(ErrorLoc, MatchResult, Operands);
4412  }
4413  case Match_InvalidTiedOperand:
4414  case Match_InvalidMemoryIndexed1:
4415  case Match_InvalidMemoryIndexed2:
4416  case Match_InvalidMemoryIndexed4:
4417  case Match_InvalidMemoryIndexed8:
4418  case Match_InvalidMemoryIndexed16:
4419  case Match_InvalidCondCode:
4420  case Match_AddSubRegExtendSmall:
4421  case Match_AddSubRegExtendLarge:
4422  case Match_AddSubSecondSource:
4423  case Match_LogicalSecondSource:
4424  case Match_AddSubRegShift32:
4425  case Match_AddSubRegShift64:
4426  case Match_InvalidMovImm32Shift:
4427  case Match_InvalidMovImm64Shift:
4428  case Match_InvalidFPImm:
4429  case Match_InvalidMemoryWExtend8:
4430  case Match_InvalidMemoryWExtend16:
4431  case Match_InvalidMemoryWExtend32:
4432  case Match_InvalidMemoryWExtend64:
4433  case Match_InvalidMemoryWExtend128:
4434  case Match_InvalidMemoryXExtend8:
4435  case Match_InvalidMemoryXExtend16:
4436  case Match_InvalidMemoryXExtend32:
4437  case Match_InvalidMemoryXExtend64:
4438  case Match_InvalidMemoryXExtend128:
4439  case Match_InvalidMemoryIndexed1SImm4:
4440  case Match_InvalidMemoryIndexed2SImm4:
4441  case Match_InvalidMemoryIndexed3SImm4:
4442  case Match_InvalidMemoryIndexed4SImm4:
4443  case Match_InvalidMemoryIndexed1SImm6:
4444  case Match_InvalidMemoryIndexed16SImm4:
4445  case Match_InvalidMemoryIndexed4SImm7:
4446  case Match_InvalidMemoryIndexed8SImm7:
4447  case Match_InvalidMemoryIndexed16SImm7:
4448  case Match_InvalidMemoryIndexed8UImm5:
4449  case Match_InvalidMemoryIndexed4UImm5:
4450  case Match_InvalidMemoryIndexed2UImm5:
4451  case Match_InvalidMemoryIndexed1UImm6:
4452  case Match_InvalidMemoryIndexed2UImm6:
4453  case Match_InvalidMemoryIndexed4UImm6:
4454  case Match_InvalidMemoryIndexed8UImm6:
4455  case Match_InvalidMemoryIndexedSImm6:
4456  case Match_InvalidMemoryIndexedSImm5:
4457  case Match_InvalidMemoryIndexedSImm9:
4458  case Match_InvalidMemoryIndexed8SImm10:
4459  case Match_InvalidImm0_1:
4460  case Match_InvalidImm0_7:
4461  case Match_InvalidImm0_15:
4462  case Match_InvalidImm0_31:
4463  case Match_InvalidImm0_63:
4464  case Match_InvalidImm0_127:
4465  case Match_InvalidImm0_255:
4466  case Match_InvalidImm0_65535:
4467  case Match_InvalidImm1_8:
4468  case Match_InvalidImm1_16:
4469  case Match_InvalidImm1_32:
4470  case Match_InvalidImm1_64:
4471  case Match_InvalidSVEAddSubImm8:
4472  case Match_InvalidSVEAddSubImm16:
4473  case Match_InvalidSVEAddSubImm32:
4474  case Match_InvalidSVEAddSubImm64:
4475  case Match_InvalidSVECpyImm8:
4476  case Match_InvalidSVECpyImm16:
4477  case Match_InvalidSVECpyImm32:
4478  case Match_InvalidSVECpyImm64:
4479  case Match_InvalidIndexRange1_1:
4480  case Match_InvalidIndexRange0_15:
4481  case Match_InvalidIndexRange0_7:
4482  case Match_InvalidIndexRange0_3:
4483  case Match_InvalidIndexRange0_1:
4484  case Match_InvalidSVEIndexRange0_63:
4485  case Match_InvalidSVEIndexRange0_31:
4486  case Match_InvalidSVEIndexRange0_15:
4487  case Match_InvalidSVEIndexRange0_7:
4488  case Match_InvalidSVEIndexRange0_3:
4489  case Match_InvalidLabel:
4490  case Match_InvalidComplexRotationEven:
4491  case Match_InvalidComplexRotationOdd:
4492  case Match_InvalidGPR64shifted8:
4493  case Match_InvalidGPR64shifted16:
4494  case Match_InvalidGPR64shifted32:
4495  case Match_InvalidGPR64shifted64:
4496  case Match_InvalidGPR64NoXZRshifted8:
4497  case Match_InvalidGPR64NoXZRshifted16:
4498  case Match_InvalidGPR64NoXZRshifted32:
4499  case Match_InvalidGPR64NoXZRshifted64:
4500  case Match_InvalidZPR32UXTW8:
4501  case Match_InvalidZPR32UXTW16:
4502  case Match_InvalidZPR32UXTW32:
4503  case Match_InvalidZPR32UXTW64:
4504  case Match_InvalidZPR32SXTW8:
4505  case Match_InvalidZPR32SXTW16:
4506  case Match_InvalidZPR32SXTW32:
4507  case Match_InvalidZPR32SXTW64:
4508  case Match_InvalidZPR64UXTW8:
4509  case Match_InvalidZPR64SXTW8:
4510  case Match_InvalidZPR64UXTW16:
4511  case Match_InvalidZPR64SXTW16:
4512  case Match_InvalidZPR64UXTW32:
4513  case Match_InvalidZPR64SXTW32:
4514  case Match_InvalidZPR64UXTW64:
4515  case Match_InvalidZPR64SXTW64:
4516  case Match_InvalidZPR64LSL8:
4517  case Match_InvalidZPR64LSL16:
4518  case Match_InvalidZPR64LSL32:
4519  case Match_InvalidZPR64LSL64:
4520  case Match_InvalidZPR0:
4521  case Match_InvalidZPR8:
4522  case Match_InvalidZPR16:
4523  case Match_InvalidZPR32:
4524  case Match_InvalidZPR64:
4525  case Match_InvalidZPR128:
4526  case Match_InvalidSVEPredicateAnyReg:
4527  case Match_InvalidSVEPattern:
4528  case Match_InvalidSVEPredicateBReg:
4529  case Match_InvalidSVEPredicateHReg:
4530  case Match_InvalidSVEPredicateSReg:
4531  case Match_InvalidSVEPredicateDReg:
4532  case Match_InvalidSVEPredicate3bAnyReg:
4533  case Match_InvalidSVEPredicate3bBReg:
4534  case Match_InvalidSVEPredicate3bHReg:
4535  case Match_InvalidSVEPredicate3bSReg:
4536  case Match_InvalidSVEPredicate3bDReg:
4537  case Match_InvalidSVEExactFPImmOperandHalfOne:
4538  case Match_InvalidSVEExactFPImmOperandHalfTwo:
4539  case Match_InvalidSVEExactFPImmOperandZeroOne:
4540  case Match_MSR:
4541  case Match_MRS: {
4542  if (ErrorInfo >= Operands.size())
4543  return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
4544  // Any time we get here, there's nothing fancy to do. Just get the
4545  // operand SMLoc and display the diagnostic.
4546  SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4547  if (ErrorLoc == SMLoc())
4548  ErrorLoc = IDLoc;
4549  return showMatchError(ErrorLoc, MatchResult, Operands);
4550  }
4551  }
4552 
4553  llvm_unreachable("Implement any new match types added!");
4554 }
4555 
4556 /// ParseDirective parses the arm specific directives
4557 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4559  getContext().getObjectFileInfo()->getObjectFileType();
4560  bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4561  bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4562 
4563  StringRef IDVal = DirectiveID.getIdentifier();
4564  SMLoc Loc = DirectiveID.getLoc();
4565  if (IDVal == ".arch")
4566  parseDirectiveArch(Loc);
4567  else if (IDVal == ".cpu")
4568  parseDirectiveCPU(Loc);
4569  else if (IDVal == ".tlsdesccall")
4570  parseDirectiveTLSDescCall(Loc);
4571  else if (IDVal == ".ltorg" || IDVal == ".pool")
4572  parseDirectiveLtorg(Loc);
4573  else if (IDVal == ".unreq")
4574  parseDirectiveUnreq(Loc);
4575  else if (!IsMachO && !IsCOFF) {
4576  if (IDVal == ".inst")
4577  parseDirectiveInst(Loc);
4578  else
4579  return true;
4580  } else if (IDVal == MCLOHDirectiveName())
4581  parseDirectiveLOH(IDVal, Loc);
4582  else
4583  return true;
4584  return false;
4585 }
4586 
4587 static const struct {
4588  const char *Name;
4590 } ExtensionMap[] = {
4591  { "crc", {AArch64::FeatureCRC} },
4592  { "crypto", {AArch64::FeatureCrypto} },
4593  { "fp", {AArch64::FeatureFPARMv8} },
4594  { "simd", {AArch64::FeatureNEON} },
4595  { "ras", {AArch64::FeatureRAS} },
4596  { "lse", {AArch64::FeatureLSE} },
4597 
4598  // FIXME: Unsupported extensions
4599  { "pan", {} },
4600  { "lor", {} },
4601  { "rdma", {} },
4602  { "profile", {} },
4603 };
4604 
4605 /// parseDirectiveArch
4606 /// ::= .arch token
4607 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
4608  SMLoc ArchLoc = getLoc();
4609 
4610  StringRef Arch, ExtensionString;
4611  std::tie(Arch, ExtensionString) =
4612  getParser().parseStringToEndOfStatement().trim().split('+');
4613 
4615  if (ID == AArch64::ArchKind::INVALID)
4616  return Error(ArchLoc, "unknown arch name");
4617 
4618  if (parseToken(AsmToken::EndOfStatement))
4619  return true;
4620 
4621  // Get the architecture and extension features.
4622  std::vector<StringRef> AArch64Features;
4623  AArch64::getArchFeatures(ID, AArch64Features);
4625  AArch64Features);
4626 
4627  MCSubtargetInfo &STI = copySTI();
4628  std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
4629  STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
4630 
4631  SmallVector<StringRef, 4> RequestedExtensions;
4632  if (!ExtensionString.empty())
4633  ExtensionString.split(RequestedExtensions, '+');
4634 
4636  for (auto Name : RequestedExtensions) {
4637  bool EnableFeature = true;
4638 
4639  if (Name.startswith_lower("no")) {
4640  EnableFeature = false;
4641  Name = Name.substr(2);
4642  }
4643 
4644  for (const auto &Extension : ExtensionMap) {
4645  if (Extension.Name != Name)
4646  continue;
4647 
4648  if (Extension.Features.none())
4649  report_fatal_error("unsupported architectural extension: " + Name);
4650 
4651  FeatureBitset ToggleFeatures = EnableFeature
4652  ? (~Features & Extension.Features)
4653  : ( Features & Extension.Features);
4654  uint64_t Features =
4655  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
4656  setAvailableFeatures(Features);
4657  break;
4658  }
4659  }
4660  return false;
4661 }
4662 
4663 static SMLoc incrementLoc(SMLoc L, int Offset) {
4664  return SMLoc::getFromPointer(L.getPointer() + Offset);
4665 }
4666 
4667 /// parseDirectiveCPU
4668 /// ::= .cpu id
4669 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
4670  SMLoc CurLoc = getLoc();
4671 
4672  StringRef CPU, ExtensionString;
4673  std::tie(CPU, ExtensionString) =
4674  getParser().parseStringToEndOfStatement().trim().split('+');
4675 
4676  if (parseToken(AsmToken::EndOfStatement))
4677  return true;
4678 
4679  SmallVector<StringRef, 4> RequestedExtensions;
4680  if (!ExtensionString.empty())
4681  ExtensionString.split(RequestedExtensions, '+');
4682 
4683  // FIXME This is using tablegen data, but should be moved to ARMTargetParser
4684  // once that is tablegen'ed
4685  if (!getSTI().isCPUStringValid(CPU)) {
4686  Error(CurLoc, "unknown CPU name");
4687  return false;
4688  }
4689 
4690  MCSubtargetInfo &STI = copySTI();
4691  STI.setDefaultFeatures(CPU, "");
4692  CurLoc = incrementLoc(CurLoc, CPU.size());
4693 
4695  for (auto Name : RequestedExtensions) {
4696  // Advance source location past '+'.
4697  CurLoc = incrementLoc(CurLoc, 1);
4698 
4699  bool EnableFeature = true;
4700 
4701  if (Name.startswith_lower("no")) {
4702  EnableFeature = false;
4703  Name = Name.substr(2);
4704  }
4705 
4706  bool FoundExtension = false;
4707  for (const auto &Extension : ExtensionMap) {
4708  if (Extension.Name != Name)
4709  continue;
4710 
4711  if (Extension.Features.none())
4712  report_fatal_error("unsupported architectural extension: " + Name);
4713 
4714  FeatureBitset ToggleFeatures = EnableFeature
4715  ? (~Features & Extension.Features)
4716  : ( Features & Extension.Features);
4717  uint64_t Features =
4718  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
4719  setAvailableFeatures(Features);
4720  FoundExtension = true;
4721 
4722  break;
4723  }
4724 
4725  if (!FoundExtension)
4726  Error(CurLoc, "unsupported architectural extension");
4727 
4728  CurLoc = incrementLoc(CurLoc, Name.size());
4729  }
4730  return false;
4731 }
4732 
4733 /// parseDirectiveInst
4734 /// ::= .inst opcode [, ...]
4735 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4736  if (getLexer().is(AsmToken::EndOfStatement))
4737  return Error(Loc, "expected expression following '.inst' directive");
4738 
4739  auto parseOp = [&]() -> bool {
4740  SMLoc L = getLoc();
4741  const MCExpr *Expr;
4742  if (check(getParser().parseExpression(Expr), L, "expected expression"))
4743  return true;
4744  const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4745  if (check(!Value, L, "expected constant expression"))
4746  return true;
4747  getTargetStreamer().emitInst(Value->getValue());
4748  return false;
4749  };
4750 
4751  if (parseMany(parseOp))
4752  return addErrorSuffix(" in '.inst' directive");
4753  return false;
4754 }
4755 
4756 // parseDirectiveTLSDescCall:
4757 // ::= .tlsdesccall symbol
4758 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4759  StringRef Name;
4760  if (check(getParser().parseIdentifier(Name), L,
4761  "expected symbol after directive") ||
4762  parseToken(AsmToken::EndOfStatement))
4763  return true;
4764 
4765  MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4766  const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4767  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4768 
4769  MCInst Inst;
4770  Inst.setOpcode(AArch64::TLSDESCCALL);
4771  Inst.addOperand(MCOperand::createExpr(Expr));
4772 
4773  getParser().getStreamer().EmitInstruction(Inst, getSTI());
4774  return false;
4775 }
4776 
4777 /// ::= .loh <lohName | lohId> label1, ..., labelN
4778 /// The number of arguments depends on the loh identifier.
4779 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4780  MCLOHType Kind;
4781  if (getParser().getTok().isNot(AsmToken::Identifier)) {
4782  if (getParser().getTok().isNot(AsmToken::Integer))
4783  return TokError("expected an identifier or a number in directive");
4784  // We successfully get a numeric value for the identifier.
4785  // Check if it is valid.
4786  int64_t Id = getParser().getTok().getIntVal();
4787  if (Id <= -1U && !isValidMCLOHType(Id))
4788  return TokError("invalid numeric identifier in directive");
4789  Kind = (MCLOHType)Id;
4790  } else {
4791  StringRef Name = getTok().getIdentifier();
4792  // We successfully parse an identifier.
4793  // Check if it is a recognized one.
4794  int Id = MCLOHNameToId(Name);
4795 
4796  if (Id == -1)
4797  return TokError("invalid identifier in directive");
4798  Kind = (MCLOHType)Id;
4799  }
4800  // Consume the identifier.
4801  Lex();
4802  // Get the number of arguments of this LOH.
4803  int NbArgs = MCLOHIdToNbArgs(Kind);
4804 
4805  assert(NbArgs != -1 && "Invalid number of arguments");
4806 
4808  for (int Idx = 0; Idx < NbArgs; ++Idx) {
4809  StringRef Name;
4810  if (getParser().parseIdentifier(Name))
4811  return TokError("expected identifier in directive");
4812  Args.push_back(getContext().getOrCreateSymbol(Name));
4813 
4814  if (Idx + 1 == NbArgs)
4815  break;
4816  if (parseToken(AsmToken::Comma,
4817  "unexpected token in '" + Twine(IDVal) + "' directive"))
4818  return true;
4819  }
4820  if (parseToken(AsmToken::EndOfStatement,
4821  "unexpected token in '" + Twine(IDVal) + "' directive"))
4822  return true;
4823 
4824  getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4825  return false;
4826 }
4827 
4828 /// parseDirectiveLtorg
4829 /// ::= .ltorg | .pool
4830 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4831  if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
4832  return true;
4833  getTargetStreamer().emitCurrentConstantPool();
4834  return false;
4835 }
4836 
4837 /// parseDirectiveReq
4838 /// ::= name .req registername
4839 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4840  MCAsmParser &Parser = getParser();
4841  Parser.Lex(); // Eat the '.req' token.
4842  SMLoc SRegLoc = getLoc();
4844  unsigned RegNum;
4845  OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
4846 
4847  if (ParseRes != MatchOperand_Success) {
4848  StringRef Kind;
4849  RegisterKind = RegKind::NeonVector;
4850  ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
4851 
4852  if (ParseRes == MatchOperand_ParseFail)
4853  return true;
4854 
4855  if (ParseRes == MatchOperand_Success && !Kind.empty())
4856  return Error(SRegLoc, "vector register without type specifier expected");
4857  }
4858 
4859  if (ParseRes != MatchOperand_Success) {
4860  StringRef Kind;
4861  RegisterKind = RegKind::SVEDataVector;
4862  ParseRes =
4863  tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
4864 
4865  if (ParseRes == MatchOperand_ParseFail)
4866  return true;
4867 
4868  if (ParseRes == MatchOperand_Success && !Kind.empty())
4869  return Error(SRegLoc,
4870  "sve vector register without type specifier expected");
4871  }
4872 
4873  if (ParseRes != MatchOperand_Success) {
4874  StringRef Kind;
4875  RegisterKind = RegKind::SVEPredicateVector;
4876  ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
4877 
4878  if (ParseRes == MatchOperand_ParseFail)
4879  return true;
4880 
4881  if (ParseRes == MatchOperand_Success && !Kind.empty())
4882  return Error(SRegLoc,
4883  "sve predicate register without type specifier expected");
4884  }
4885 
4886  if (ParseRes != MatchOperand_Success)
4887  return Error(SRegLoc, "register name or alias expected");
4888 
4889  // Shouldn't be anything else.
4890  if (parseToken(AsmToken::EndOfStatement,
4891  "unexpected input in .req directive"))
4892  return true;
4893 
4894  auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
4895  if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4896  Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4897 
4898  return false;
4899 }
4900 
4901 /// parseDirectiveUneq
4902 /// ::= .unreq registername
4903 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4904  MCAsmParser &Parser = getParser();
4905  if (getTok().isNot(AsmToken::Identifier))
4906  return TokError("unexpected input in .unreq directive.");
4907  RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4908  Parser.Lex(); // Eat the identifier.
4909  if (parseToken(AsmToken::EndOfStatement))
4910  return addErrorSuffix("in '.unreq' directive");
4911  return false;
4912 }
4913 
4914 bool
4915 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4916  AArch64MCExpr::VariantKind &ELFRefKind,
4917  MCSymbolRefExpr::VariantKind &DarwinRefKind,
4918  int64_t &Addend) {
4919  ELFRefKind = AArch64MCExpr::VK_INVALID;
4920  DarwinRefKind = MCSymbolRefExpr::VK_None;
4921  Addend = 0;
4922 
4923  if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4924  ELFRefKind = AE->getKind();
4925  Expr = AE->getSubExpr();
4926  }
4927 
4928  const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4929  if (SE) {
4930  // It's a simple symbol reference with no addend.
4931  DarwinRefKind = SE->getKind();
4932  return true;
4933  }
4934 
4935  const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4936  if (!BE)
4937  return false;
4938 
4939  SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4940  if (!SE)
4941  return false;
4942  DarwinRefKind = SE->getKind();
4943 
4944  if (BE->getOpcode() != MCBinaryExpr::Add &&
4945  BE->getOpcode() != MCBinaryExpr::Sub)
4946  return false;
4947 
4948  // See if the addend is a constant, otherwise there's more going
4949  // on here than we can deal with.
4950  auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4951  if (!AddendExpr)
4952  return false;
4953 
4954  Addend = AddendExpr->getValue();
4955  if (BE->getOpcode() == MCBinaryExpr::Sub)
4956  Addend = -Addend;
4957 
4958  // It's some symbol reference + a constant addend, but really
4959  // shouldn't use both Darwin and ELF syntax.
4960  return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4961  DarwinRefKind == MCSymbolRefExpr::VK_None;
4962 }
4963 
4964 /// Force static initialization.
4969 }
4970 
4971 #define GET_REGISTER_MATCHER
4972 #define GET_SUBTARGET_FEATURE_NAME
4973 #define GET_MATCHER_IMPLEMENTATION
4974 #define GET_MNEMONIC_SPELL_CHECKER
4975 #include "AArch64GenAsmMatcher.inc"
4976 
4977 // Define this matcher function after the auto-generated include so we
4978 // have the match class enum definitions.
4979 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4980  unsigned Kind) {
4981  AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4982  // If the kind is a token for a literal immediate, check if our asm
4983  // operand matches. This is for InstAliases which have a fixed-value
4984  // immediate in the syntax.
4985  int64_t ExpectedVal;
4986  switch (Kind) {
4987  default:
4988  return Match_InvalidOperand;
4989  case MCK__35_0:
4990  ExpectedVal = 0;
4991  break;
4992  case MCK__35_1:
4993  ExpectedVal = 1;
4994  break;
4995  case MCK__35_12:
4996  ExpectedVal = 12;
4997  break;
4998  case MCK__35_16:
4999  ExpectedVal = 16;
5000  break;
5001  case MCK__35_2:
5002  ExpectedVal = 2;
5003  break;
5004  case MCK__35_24:
5005  ExpectedVal = 24;
5006  break;
5007  case MCK__35_3:
5008  ExpectedVal = 3;
5009  break;
5010  case MCK__35_32:
5011  ExpectedVal = 32;
5012  break;
5013  case MCK__35_4:
5014  ExpectedVal = 4;
5015  break;
5016  case MCK__35_48:
5017  ExpectedVal = 48;
5018  break;
5019  case MCK__35_6:
5020  ExpectedVal = 6;
5021  break;
5022  case MCK__35_64:
5023  ExpectedVal = 64;
5024  break;
5025  case MCK__35_8:
5026  ExpectedVal = 8;
5027  break;
5028  }
5029  if (!Op.isImm())
5030  return Match_InvalidOperand;
5031  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
5032  if (!CE)
5033  return Match_InvalidOperand;
5034  if (CE->getValue() == ExpectedVal)
5035  return Match_Success;
5036  return Match_InvalidOperand;
5037 }
5038 
5040 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
5041 
5042  SMLoc S = getLoc();
5043 
5044  if (getParser().getTok().isNot(AsmToken::Identifier)) {
5045  Error(S, "expected register");
5046  return MatchOperand_ParseFail;
5047  }
5048 
5049  unsigned FirstReg;
5050  OperandMatchResultTy Res = tryParseScalarRegister(FirstReg);
5051  if (Res != MatchOperand_Success)
5052  return MatchOperand_ParseFail;
5053 
5054  const MCRegisterClass &WRegClass =
5055  AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
5056  const MCRegisterClass &XRegClass =
5057  AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
5058 
5059  bool isXReg = XRegClass.contains(FirstReg),
5060  isWReg = WRegClass.contains(FirstReg);
5061  if (!isXReg && !isWReg) {
5062  Error(S, "expected first even register of a "
5063  "consecutive same-size even/odd register pair");
5064  return MatchOperand_ParseFail;
5065  }
5066 
5067  const MCRegisterInfo *RI = getContext().getRegisterInfo();
5068  unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
5069 
5070  if (FirstEncoding & 0x1) {
5071  Error(S, "expected first even register of a "
5072  "consecutive same-size even/odd register pair");
5073  return MatchOperand_ParseFail;
5074  }
5075 
5076  if (getParser().getTok().isNot(AsmToken::Comma)) {
5077  Error(getLoc(), "expected comma");
5078  return MatchOperand_ParseFail;
5079  }
5080  // Eat the comma
5081  getParser().Lex();
5082 
5083  SMLoc E = getLoc();
5084  unsigned SecondReg;
5085  Res = tryParseScalarRegister(SecondReg);
5086  if (Res != MatchOperand_Success)
5087  return MatchOperand_ParseFail;
5088 
5089  if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
5090  (isXReg && !XRegClass.contains(SecondReg)) ||
5091  (isWReg && !WRegClass.contains(SecondReg))) {
5092  Error(E,"expected second odd register of a "
5093  "consecutive same-size even/odd register pair");
5094  return MatchOperand_ParseFail;
5095  }
5096 
5097  unsigned Pair = 0;
5098  if (isXReg) {
5099  Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
5100  &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
5101  } else {
5102  Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
5103  &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
5104  }
5105 
5106  Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
5107  getLoc(), getContext()));
5108 
5109  return MatchOperand_Success;
5110 }
5111 
5112 template <bool ParseShiftExtend, bool ParseSuffix>
5114 AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
5115  const SMLoc S = getLoc();
5116  // Check for a SVE vector register specifier first.
5117  unsigned RegNum;
5118  StringRef Kind;
5119 
5120  OperandMatchResultTy Res =
5121  tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5122 
5123  if (Res != MatchOperand_Success)
5124  return Res;
5125 
5126  if (ParseSuffix && Kind.empty())
5127  return MatchOperand_NoMatch;
5128 
5129  const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
5130  if (!KindRes)
5131  return MatchOperand_NoMatch;
5132 
5133  unsigned ElementWidth = KindRes->second;
5134 
5135  // No shift/extend is the default.
5136  if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
5137  Operands.push_back(AArch64Operand::CreateVectorReg(
5138  RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
5139 
5140  OperandMatchResultTy Res = tryParseVectorIndex(Operands);
5141  if (Res == MatchOperand_ParseFail)
5142  return MatchOperand_ParseFail;
5143  return MatchOperand_Success;
5144  }
5145 
5146  // Eat the comma
5147  getParser().Lex();
5148 
5149  // Match the shift
5151  Res = tryParseOptionalShiftExtend(ExtOpnd);
5152  if (Res != MatchOperand_Success)
5153  return Res;
5154 
5155  auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
5156  Operands.push_back(AArch64Operand::CreateVectorReg(
5157  RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
5158  getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5159  Ext->hasShiftExtendAmount()));
5160 
5161  return MatchOperand_Success;
5162 }
5163 
5165 AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
5166  MCAsmParser &Parser = getParser();
5167 
5168  SMLoc SS = getLoc();
5169  const AsmToken &TokE = Parser.getTok();
5170  bool IsHash = TokE.is(AsmToken::Hash);
5171 
5172  if (!IsHash && TokE.isNot(AsmToken::Identifier))
5173  return MatchOperand_NoMatch;
5174 
5175  int64_t Pattern;
5176  if (IsHash) {
5177  Parser.Lex(); // Eat hash
5178 
5179  // Parse the immediate operand.
5180  const MCExpr *ImmVal;
5181  SS = getLoc();
5182  if (Parser.parseExpression(ImmVal))
5183  return MatchOperand_ParseFail;
5184 
5185  auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
5186  if (!MCE)
5187  return MatchOperand_ParseFail;
5188 
5189  Pattern = MCE->getValue();
5190  } else {
5191  // Parse the pattern
5192  auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
5193  if (!Pat)
5194  return MatchOperand_NoMatch;
5195 
5196  Parser.Lex();
5197  Pattern = Pat->Encoding;
5198  assert(Pattern >= 0 && Pattern < 32);
5199  }
5200 
5201  Operands.push_back(
5202  AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
5203  SS, getLoc(), getContext()));
5204 
5205  return MatchOperand_Success;
5206 }
static bool isReg(const MCInst &MI, unsigned OpNo)
Represents a range in source code.
Definition: SMLoc.h:49
void push_back(const T &Elt)
Definition: SmallVector.h:213
Target & getTheAArch64beTarget()
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static float getFPImmFloat(unsigned Imm)
LLVM_NODISCARD bool startswith_lower(StringRef Prefix) const
Check if this string starts with the given Prefix, ignoring case.
Definition: StringRef.cpp:47
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition: MCAsmMacro.h:111
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:321
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:115
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:42
VariantKind getKind() const
Definition: MCExpr.h:336
LLVM_NODISCARD bool equals_lower(StringRef RHS) const
equals_lower - Check for string equality, ignoring case.
Definition: StringRef.h:176
static const AArch64MCExpr * create(const MCExpr *Expr, VariantKind Kind, MCContext &Ctx)
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:137
Generic assembler parser interface, for use by target specific assembly parsers.
Definition: MCAsmParser.h:110
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
Target & getTheAArch64leTarget()
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:137
MCTargetAsmParser - Generic interface to target specific assembly parsers.
static CondCode getInvertedCondCode(CondCode Code)
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
Target specific streamer interface.
Definition: MCStreamer.h:83
unsigned Reg
bool isNot(TokenKind K) const
Definition: MCAsmMacro.h:84
const MCExpr * getLHS() const
Get the left-hand side expression of the binary operator.
Definition: MCExpr.h:562
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
static unsigned getXRegFromWReg(unsigned Reg)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
F(f)
void changeSign()
Definition: APFloat.h:1050
const AsmToken & getTok() const
Get the current AsmToken from the stream.
Definition: MCAsmParser.cpp:34
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:128
virtual void EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI, bool PrintSchedInfo=false)
Emit the given Instruction into the current section.
Definition: MCStreamer.cpp:907
return AArch64::GPR64RegClass contains(Reg)
static SMLoc incrementLoc(SMLoc L, int Offset)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string...
Definition: MCAsmMacro.h:100
opStatus convertFromString(StringRef, roundingMode)
Definition: APFloat.cpp:4427
static MCOperand createReg(unsigned Reg)
Definition: MCInst.h:116
static ManagedStatic< DebugCounter > DC
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
bool contains(unsigned Reg) const
contains - Return true if the specified register is included in this register class.
const FeatureBitset & getFeatureBits() const
RegisterKind
static bool isSVEAddSubImm(int64_t Imm)
Returns true if Imm is valid for ADD/SUB.
static Optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (#elements, element-width) if Suffix is a valid vector kind...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:36
Target independent representation for an assembler token.
Definition: MCAsmMacro.h:22
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:166
bool getExtensionFeatures(unsigned Extensions, std::vector< StringRef > &Features)
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
std::string join(IteratorT Begin, IteratorT End, StringRef Separator)
Joins the strings in the range [Begin, End), adding Separator between the elements.
Definition: StringExtras.h:350
Target & getTheARM64Target()
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE R Default(T Value)
Definition: StringSwitch.h:203
static bool isSVECpyImm(int64_t Imm)
Returns true if Imm is valid for CPY/DUP.
static bool isMem(const MachineInstr &MI, unsigned Op)
Definition: X86InstrInfo.h:160
zlib-gnu style compression
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand...
This file implements a class to represent arbitrary precision integral constant values and operations...
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:65
AArch64::ArchKind parseArch(StringRef Arch)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
Context object for machine code objects.
Definition: MCContext.h:63
std::pair< StringRef, StringRef > getToken(StringRef Source, StringRef Delimiters=" \\\)
getToken - This function extracts one token from source, ignoring any leading characters that appear ...
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool startswith(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:267
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1559
RegisterMCAsmParser - Helper template for registering a target specific assembly parser, for use in the target machine initialization function.
const MCExpr * getRHS() const
Get the right-hand side expression of the binary operator.
Definition: MCExpr.h:565
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:133
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
SMLoc getLoc() const
Definition: MCAsmLexer.cpp:28