LLVM  14.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AArch64InstrInfo.h"
16 #include "Utils/AArch64BaseInfo.h"
17 #include "llvm/ADT/APFloat.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringExtras.h"
24 #include "llvm/ADT/StringMap.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/StringSwitch.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/MC/MCContext.h"
29 #include "llvm/MC/MCExpr.h"
30 #include "llvm/MC/MCInst.h"
38 #include "llvm/MC/MCRegisterInfo.h"
39 #include "llvm/MC/MCStreamer.h"
41 #include "llvm/MC/MCSymbol.h"
43 #include "llvm/MC/MCValue.h"
45 #include "llvm/MC/TargetRegistry.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/Compiler.h"
50 #include "llvm/Support/SMLoc.h"
54 #include <cassert>
55 #include <cctype>
56 #include <cstdint>
57 #include <cstdio>
58 #include <string>
59 #include <tuple>
60 #include <utility>
61 #include <vector>
62 
63 using namespace llvm;
64 
65 namespace {
66 
67 enum class RegKind {
68  Scalar,
69  NeonVector,
70  SVEDataVector,
71  SVEPredicateVector,
72  Matrix
73 };
74 
75 enum class MatrixKind { Array, Tile, Row, Col };
76 
77 enum RegConstraintEqualityTy {
78  EqualsReg,
79  EqualsSuperReg,
80  EqualsSubReg
81 };
82 
83 class AArch64AsmParser : public MCTargetAsmParser {
84 private:
85  StringRef Mnemonic; ///< Instruction mnemonic.
86 
87  // Map of register aliases registers via the .req directive.
89 
90  class PrefixInfo {
91  public:
92  static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
93  PrefixInfo Prefix;
94  switch (Inst.getOpcode()) {
95  case AArch64::MOVPRFX_ZZ:
96  Prefix.Active = true;
97  Prefix.Dst = Inst.getOperand(0).getReg();
98  break;
99  case AArch64::MOVPRFX_ZPmZ_B:
100  case AArch64::MOVPRFX_ZPmZ_H:
101  case AArch64::MOVPRFX_ZPmZ_S:
102  case AArch64::MOVPRFX_ZPmZ_D:
103  Prefix.Active = true;
104  Prefix.Predicated = true;
105  Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
106  assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
107  "No destructive element size set for movprfx");
108  Prefix.Dst = Inst.getOperand(0).getReg();
109  Prefix.Pg = Inst.getOperand(2).getReg();
110  break;
111  case AArch64::MOVPRFX_ZPzZ_B:
112  case AArch64::MOVPRFX_ZPzZ_H:
113  case AArch64::MOVPRFX_ZPzZ_S:
114  case AArch64::MOVPRFX_ZPzZ_D:
115  Prefix.Active = true;
116  Prefix.Predicated = true;
117  Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
118  assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
119  "No destructive element size set for movprfx");
120  Prefix.Dst = Inst.getOperand(0).getReg();
121  Prefix.Pg = Inst.getOperand(1).getReg();
122  break;
123  default:
124  break;
125  }
126 
127  return Prefix;
128  }
129 
130  PrefixInfo() : Active(false), Predicated(false) {}
131  bool isActive() const { return Active; }
132  bool isPredicated() const { return Predicated; }
133  unsigned getElementSize() const {
134  assert(Predicated);
135  return ElementSize;
136  }
137  unsigned getDstReg() const { return Dst; }
138  unsigned getPgReg() const {
139  assert(Predicated);
140  return Pg;
141  }
142 
143  private:
144  bool Active;
145  bool Predicated;
146  unsigned ElementSize;
147  unsigned Dst;
148  unsigned Pg;
149  } NextPrefix;
150 
151  AArch64TargetStreamer &getTargetStreamer() {
152  MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
153  return static_cast<AArch64TargetStreamer &>(TS);
154  }
155 
156  SMLoc getLoc() const { return getParser().getTok().getLoc(); }
157 
158  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
159  void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
160  AArch64CC::CondCode parseCondCodeString(StringRef Cond);
161  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
162  unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
163  bool parseRegister(OperandVector &Operands);
164  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
165  bool parseNeonVectorList(OperandVector &Operands);
166  bool parseOptionalMulOperand(OperandVector &Operands);
167  bool parseKeywordOperand(OperandVector &Operands);
168  bool parseOperand(OperandVector &Operands, bool isCondCode,
169  bool invertCondCode);
170  bool parseImmExpr(int64_t &Out);
171  bool parseComma();
172  bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
173  unsigned Last);
174 
175  bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
177 
178  bool parseDirectiveArch(SMLoc L);
179  bool parseDirectiveArchExtension(SMLoc L);
180  bool parseDirectiveCPU(SMLoc L);
181  bool parseDirectiveInst(SMLoc L);
182 
183  bool parseDirectiveTLSDescCall(SMLoc L);
184 
185  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
186  bool parseDirectiveLtorg(SMLoc L);
187 
188  bool parseDirectiveReq(StringRef Name, SMLoc L);
189  bool parseDirectiveUnreq(SMLoc L);
190  bool parseDirectiveCFINegateRAState();
191  bool parseDirectiveCFIBKeyFrame();
192 
193  bool parseDirectiveVariantPCS(SMLoc L);
194 
195  bool parseDirectiveSEHAllocStack(SMLoc L);
196  bool parseDirectiveSEHPrologEnd(SMLoc L);
197  bool parseDirectiveSEHSaveR19R20X(SMLoc L);
198  bool parseDirectiveSEHSaveFPLR(SMLoc L);
199  bool parseDirectiveSEHSaveFPLRX(SMLoc L);
200  bool parseDirectiveSEHSaveReg(SMLoc L);
201  bool parseDirectiveSEHSaveRegX(SMLoc L);
202  bool parseDirectiveSEHSaveRegP(SMLoc L);
203  bool parseDirectiveSEHSaveRegPX(SMLoc L);
204  bool parseDirectiveSEHSaveLRPair(SMLoc L);
205  bool parseDirectiveSEHSaveFReg(SMLoc L);
206  bool parseDirectiveSEHSaveFRegX(SMLoc L);
207  bool parseDirectiveSEHSaveFRegP(SMLoc L);
208  bool parseDirectiveSEHSaveFRegPX(SMLoc L);
209  bool parseDirectiveSEHSetFP(SMLoc L);
210  bool parseDirectiveSEHAddFP(SMLoc L);
211  bool parseDirectiveSEHNop(SMLoc L);
212  bool parseDirectiveSEHSaveNext(SMLoc L);
213  bool parseDirectiveSEHEpilogStart(SMLoc L);
214  bool parseDirectiveSEHEpilogEnd(SMLoc L);
215  bool parseDirectiveSEHTrapFrame(SMLoc L);
216  bool parseDirectiveSEHMachineFrame(SMLoc L);
217  bool parseDirectiveSEHContext(SMLoc L);
218  bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
219 
220  bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
222  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
225  bool MatchingInlineAsm) override;
226 /// @name Auto-generated Match Functions
227 /// {
228 
229 #define GET_ASSEMBLER_HEADER
230 #include "AArch64GenAsmMatcher.inc"
231 
232  /// }
233 
234  OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
235  OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
236  RegKind MatchKind);
237  OperandMatchResultTy tryParseMatrixRegister(OperandVector &Operands);
239  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
240  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
241  OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
242  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
244  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
245  template <bool IsSVEPrefetch = false>
246  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
247  OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
248  OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
249  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
250  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
251  template<bool AddFPZeroAsLiteral>
253  OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
254  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
255  bool tryParseNeonVectorRegister(OperandVector &Operands);
256  OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
257  OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
258  template <bool ParseShiftExtend,
259  RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
260  OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
261  template <bool ParseShiftExtend, bool ParseSuffix>
262  OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
263  OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
264  template <RegKind VectorKind>
265  OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
266  bool ExpectMatch = false);
267  OperandMatchResultTy tryParseMatrixTileList(OperandVector &Operands);
268  OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
269  OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands);
270 
271 public:
272  enum AArch64MatchResultTy {
273  Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
274 #define GET_OPERAND_DIAGNOSTIC_TYPES
275 #include "AArch64GenAsmMatcher.inc"
276  };
277  bool IsILP32;
278 
279  AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
280  const MCInstrInfo &MII, const MCTargetOptions &Options)
281  : MCTargetAsmParser(Options, STI, MII) {
282  IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
284  MCStreamer &S = getParser().getStreamer();
285  if (S.getTargetStreamer() == nullptr)
287 
288  // Alias .hword/.word/.[dx]word to the target-independent
289  // .2byte/.4byte/.8byte directives as they have the same form and
290  // semantics:
291  /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
292  Parser.addAliasForDirective(".hword", ".2byte");
293  Parser.addAliasForDirective(".word", ".4byte");
294  Parser.addAliasForDirective(".dword", ".8byte");
295  Parser.addAliasForDirective(".xword", ".8byte");
296 
297  // Initialize the set of available features.
298  setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
299  }
300 
301  bool regsEqual(const MCParsedAsmOperand &Op1,
302  const MCParsedAsmOperand &Op2) const override;
303  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
304  SMLoc NameLoc, OperandVector &Operands) override;
305  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
306  OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
307  SMLoc &EndLoc) override;
308  bool ParseDirective(AsmToken DirectiveID) override;
309  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
310  unsigned Kind) override;
311 
312  static bool classifySymbolRef(const MCExpr *Expr,
313  AArch64MCExpr::VariantKind &ELFRefKind,
314  MCSymbolRefExpr::VariantKind &DarwinRefKind,
315  int64_t &Addend);
316 };
317 
318 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
319 /// instruction.
320 class AArch64Operand : public MCParsedAsmOperand {
321 private:
322  enum KindTy {
323  k_Immediate,
324  k_ShiftedImm,
325  k_CondCode,
326  k_Register,
327  k_MatrixRegister,
328  k_MatrixTileList,
329  k_SVCR,
330  k_VectorList,
331  k_VectorIndex,
332  k_Token,
333  k_SysReg,
334  k_SysCR,
335  k_Prefetch,
336  k_ShiftExtend,
337  k_FPImm,
338  k_Barrier,
339  k_PSBHint,
340  k_BTIHint,
341  } Kind;
342 
343  SMLoc StartLoc, EndLoc;
344 
345  struct TokOp {
346  const char *Data;
347  unsigned Length;
348  bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
349  };
350 
351  // Separate shift/extend operand.
352  struct ShiftExtendOp {
354  unsigned Amount;
355  bool HasExplicitAmount;
356  };
357 
358  struct RegOp {
359  unsigned RegNum;
360  RegKind Kind;
361  int ElementWidth;
362 
363  // The register may be allowed as a different register class,
364  // e.g. for GPR64as32 or GPR32as64.
365  RegConstraintEqualityTy EqualityTy;
366 
367  // In some cases the shift/extend needs to be explicitly parsed together
368  // with the register, rather than as a separate operand. This is needed
369  // for addressing modes where the instruction as a whole dictates the
370  // scaling/extend, rather than specific bits in the instruction.
371  // By parsing them as a single operand, we avoid the need to pass an
372  // extra operand in all CodeGen patterns (because all operands need to
373  // have an associated value), and we avoid the need to update TableGen to
374  // accept operands that have no associated bits in the instruction.
375  //
376  // An added benefit of parsing them together is that the assembler
377  // can give a sensible diagnostic if the scaling is not correct.
378  //
379  // The default is 'lsl #0' (HasExplicitAmount = false) if no
380  // ShiftExtend is specified.
381  ShiftExtendOp ShiftExtend;
382  };
383 
384  struct MatrixRegOp {
385  unsigned RegNum;
386  unsigned ElementWidth;
387  MatrixKind Kind;
388  };
389 
390  struct MatrixTileListOp {
391  unsigned RegMask = 0;
392  };
393 
394  struct VectorListOp {
395  unsigned RegNum;
396  unsigned Count;
397  unsigned NumElements;
398  unsigned ElementWidth;
399  RegKind RegisterKind;
400  };
401 
402  struct VectorIndexOp {
403  int Val;
404  };
405 
406  struct ImmOp {
407  const MCExpr *Val;
408  };
409 
410  struct ShiftedImmOp {
411  const MCExpr *Val;
412  unsigned ShiftAmount;
413  };
414 
415  struct CondCodeOp {
417  };
418 
419  struct FPImmOp {
420  uint64_t Val; // APFloat value bitcasted to uint64_t.
421  bool IsExact; // describes whether parsed value was exact.
422  };
423 
424  struct BarrierOp {
425  const char *Data;
426  unsigned Length;
427  unsigned Val; // Not the enum since not all values have names.
428  bool HasnXSModifier;
429  };
430 
431  struct SysRegOp {
432  const char *Data;
433  unsigned Length;
434  uint32_t MRSReg;
435  uint32_t MSRReg;
436  uint32_t PStateField;
437  };
438 
439  struct SysCRImmOp {
440  unsigned Val;
441  };
442 
443  struct PrefetchOp {
444  const char *Data;
445  unsigned Length;
446  unsigned Val;
447  };
448 
449  struct PSBHintOp {
450  const char *Data;
451  unsigned Length;
452  unsigned Val;
453  };
454 
455  struct BTIHintOp {
456  const char *Data;
457  unsigned Length;
458  unsigned Val;
459  };
460 
461  struct SVCROp {
462  const char *Data;
463  unsigned Length;
464  unsigned PStateField;
465  };
466 
467  union {
468  struct TokOp Tok;
469  struct RegOp Reg;
470  struct MatrixRegOp MatrixReg;
471  struct MatrixTileListOp MatrixTileList;
472  struct VectorListOp VectorList;
473  struct VectorIndexOp VectorIndex;
474  struct ImmOp Imm;
475  struct ShiftedImmOp ShiftedImm;
476  struct CondCodeOp CondCode;
477  struct FPImmOp FPImm;
478  struct BarrierOp Barrier;
479  struct SysRegOp SysReg;
480  struct SysCRImmOp SysCRImm;
481  struct PrefetchOp Prefetch;
482  struct PSBHintOp PSBHint;
483  struct BTIHintOp BTIHint;
484  struct ShiftExtendOp ShiftExtend;
485  struct SVCROp SVCR;
486  };
487 
488  // Keep the MCContext around as the MCExprs may need manipulated during
489  // the add<>Operands() calls.
490  MCContext &Ctx;
491 
492 public:
493  AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
494 
495  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
496  Kind = o.Kind;
497  StartLoc = o.StartLoc;
498  EndLoc = o.EndLoc;
499  switch (Kind) {
500  case k_Token:
501  Tok = o.Tok;
502  break;
503  case k_Immediate:
504  Imm = o.Imm;
505  break;
506  case k_ShiftedImm:
507  ShiftedImm = o.ShiftedImm;
508  break;
509  case k_CondCode:
510  CondCode = o.CondCode;
511  break;
512  case k_FPImm:
513  FPImm = o.FPImm;
514  break;
515  case k_Barrier:
516  Barrier = o.Barrier;
517  break;
518  case k_Register:
519  Reg = o.Reg;
520  break;
521  case k_MatrixRegister:
522  MatrixReg = o.MatrixReg;
523  break;
524  case k_MatrixTileList:
525  MatrixTileList = o.MatrixTileList;
526  break;
527  case k_VectorList:
528  VectorList = o.VectorList;
529  break;
530  case k_VectorIndex:
531  VectorIndex = o.VectorIndex;
532  break;
533  case k_SysReg:
534  SysReg = o.SysReg;
535  break;
536  case k_SysCR:
537  SysCRImm = o.SysCRImm;
538  break;
539  case k_Prefetch:
540  Prefetch = o.Prefetch;
541  break;
542  case k_PSBHint:
543  PSBHint = o.PSBHint;
544  break;
545  case k_BTIHint:
546  BTIHint = o.BTIHint;
547  break;
548  case k_ShiftExtend:
549  ShiftExtend = o.ShiftExtend;
550  break;
551  case k_SVCR:
552  SVCR = o.SVCR;
553  break;
554  }
555  }
556 
557  /// getStartLoc - Get the location of the first token of this operand.
558  SMLoc getStartLoc() const override { return StartLoc; }
559  /// getEndLoc - Get the location of the last token of this operand.
560  SMLoc getEndLoc() const override { return EndLoc; }
561 
562  StringRef getToken() const {
563  assert(Kind == k_Token && "Invalid access!");
564  return StringRef(Tok.Data, Tok.Length);
565  }
566 
567  bool isTokenSuffix() const {
568  assert(Kind == k_Token && "Invalid access!");
569  return Tok.IsSuffix;
570  }
571 
572  const MCExpr *getImm() const {
573  assert(Kind == k_Immediate && "Invalid access!");
574  return Imm.Val;
575  }
576 
577  const MCExpr *getShiftedImmVal() const {
578  assert(Kind == k_ShiftedImm && "Invalid access!");
579  return ShiftedImm.Val;
580  }
581 
582  unsigned getShiftedImmShift() const {
583  assert(Kind == k_ShiftedImm && "Invalid access!");
584  return ShiftedImm.ShiftAmount;
585  }
586 
588  assert(Kind == k_CondCode && "Invalid access!");
589  return CondCode.Code;
590  }
591 
592  APFloat getFPImm() const {
593  assert (Kind == k_FPImm && "Invalid access!");
594  return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
595  }
596 
597  bool getFPImmIsExact() const {
598  assert (Kind == k_FPImm && "Invalid access!");
599  return FPImm.IsExact;
600  }
601 
602  unsigned getBarrier() const {
603  assert(Kind == k_Barrier && "Invalid access!");
604  return Barrier.Val;
605  }
606 
607  StringRef getBarrierName() const {
608  assert(Kind == k_Barrier && "Invalid access!");
609  return StringRef(Barrier.Data, Barrier.Length);
610  }
611 
612  bool getBarriernXSModifier() const {
613  assert(Kind == k_Barrier && "Invalid access!");
614  return Barrier.HasnXSModifier;
615  }
616 
617  unsigned getReg() const override {
618  assert(Kind == k_Register && "Invalid access!");
619  return Reg.RegNum;
620  }
621 
622  unsigned getMatrixReg() const {
623  assert(Kind == k_MatrixRegister && "Invalid access!");
624  return MatrixReg.RegNum;
625  }
626 
627  unsigned getMatrixElementWidth() const {
628  assert(Kind == k_MatrixRegister && "Invalid access!");
629  return MatrixReg.ElementWidth;
630  }
631 
632  MatrixKind getMatrixKind() const {
633  assert(Kind == k_MatrixRegister && "Invalid access!");
634  return MatrixReg.Kind;
635  }
636 
637  unsigned getMatrixTileListRegMask() const {
638  assert(isMatrixTileList() && "Invalid access!");
639  return MatrixTileList.RegMask;
640  }
641 
642  RegConstraintEqualityTy getRegEqualityTy() const {
643  assert(Kind == k_Register && "Invalid access!");
644  return Reg.EqualityTy;
645  }
646 
647  unsigned getVectorListStart() const {
648  assert(Kind == k_VectorList && "Invalid access!");
649  return VectorList.RegNum;
650  }
651 
652  unsigned getVectorListCount() const {
653  assert(Kind == k_VectorList && "Invalid access!");
654  return VectorList.Count;
655  }
656 
657  int getVectorIndex() const {
658  assert(Kind == k_VectorIndex && "Invalid access!");
659  return VectorIndex.Val;
660  }
661 
662  StringRef getSysReg() const {
663  assert(Kind == k_SysReg && "Invalid access!");
664  return StringRef(SysReg.Data, SysReg.Length);
665  }
666 
667  unsigned getSysCR() const {
668  assert(Kind == k_SysCR && "Invalid access!");
669  return SysCRImm.Val;
670  }
671 
672  unsigned getPrefetch() const {
673  assert(Kind == k_Prefetch && "Invalid access!");
674  return Prefetch.Val;
675  }
676 
677  unsigned getPSBHint() const {
678  assert(Kind == k_PSBHint && "Invalid access!");
679  return PSBHint.Val;
680  }
681 
682  StringRef getPSBHintName() const {
683  assert(Kind == k_PSBHint && "Invalid access!");
684  return StringRef(PSBHint.Data, PSBHint.Length);
685  }
686 
687  unsigned getBTIHint() const {
688  assert(Kind == k_BTIHint && "Invalid access!");
689  return BTIHint.Val;
690  }
691 
692  StringRef getBTIHintName() const {
693  assert(Kind == k_BTIHint && "Invalid access!");
694  return StringRef(BTIHint.Data, BTIHint.Length);
695  }
696 
697  StringRef getSVCR() const {
698  assert(Kind == k_SVCR && "Invalid access!");
699  return StringRef(SVCR.Data, SVCR.Length);
700  }
701 
702  StringRef getPrefetchName() const {
703  assert(Kind == k_Prefetch && "Invalid access!");
704  return StringRef(Prefetch.Data, Prefetch.Length);
705  }
706 
707  AArch64_AM::ShiftExtendType getShiftExtendType() const {
708  if (Kind == k_ShiftExtend)
709  return ShiftExtend.Type;
710  if (Kind == k_Register)
711  return Reg.ShiftExtend.Type;
712  llvm_unreachable("Invalid access!");
713  }
714 
715  unsigned getShiftExtendAmount() const {
716  if (Kind == k_ShiftExtend)
717  return ShiftExtend.Amount;
718  if (Kind == k_Register)
719  return Reg.ShiftExtend.Amount;
720  llvm_unreachable("Invalid access!");
721  }
722 
723  bool hasShiftExtendAmount() const {
724  if (Kind == k_ShiftExtend)
725  return ShiftExtend.HasExplicitAmount;
726  if (Kind == k_Register)
727  return Reg.ShiftExtend.HasExplicitAmount;
728  llvm_unreachable("Invalid access!");
729  }
730 
731  bool isImm() const override { return Kind == k_Immediate; }
732  bool isMem() const override { return false; }
733 
734  bool isUImm6() const {
735  if (!isImm())
736  return false;
737  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
738  if (!MCE)
739  return false;
740  int64_t Val = MCE->getValue();
741  return (Val >= 0 && Val < 64);
742  }
743 
744  template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
745 
746  template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
747  return isImmScaled<Bits, Scale>(true);
748  }
749 
750  template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
751  return isImmScaled<Bits, Scale>(false);
752  }
753 
754  template <int Bits, int Scale>
755  DiagnosticPredicate isImmScaled(bool Signed) const {
756  if (!isImm())
758 
759  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
760  if (!MCE)
762 
763  int64_t MinVal, MaxVal;
764  if (Signed) {
765  int64_t Shift = Bits - 1;
766  MinVal = (int64_t(1) << Shift) * -Scale;
767  MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
768  } else {
769  MinVal = 0;
770  MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
771  }
772 
773  int64_t Val = MCE->getValue();
774  if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
776 
778  }
779 
780  DiagnosticPredicate isSVEPattern() const {
781  if (!isImm())
783  auto *MCE = dyn_cast<MCConstantExpr>(getImm());
784  if (!MCE)
786  int64_t Val = MCE->getValue();
787  if (Val >= 0 && Val < 32)
790  }
791 
792  bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
793  AArch64MCExpr::VariantKind ELFRefKind;
794  MCSymbolRefExpr::VariantKind DarwinRefKind;
795  int64_t Addend;
796  if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
797  Addend)) {
798  // If we don't understand the expression, assume the best and
799  // let the fixup and relocation code deal with it.
800  return true;
801  }
802 
803  if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
804  ELFRefKind == AArch64MCExpr::VK_LO12 ||
805  ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
806  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
807  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
808  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
809  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
810  ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
811  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
812  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
813  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
814  ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
815  // Note that we don't range-check the addend. It's adjusted modulo page
816  // size when converted, so there is no "out of range" condition when using
817  // @pageoff.
818  return true;
819  } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
820  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
821  // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
822  return Addend == 0;
823  }
824 
825  return false;
826  }
827 
828  template <int Scale> bool isUImm12Offset() const {
829  if (!isImm())
830  return false;
831 
832  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
833  if (!MCE)
834  return isSymbolicUImm12Offset(getImm());
835 
836  int64_t Val = MCE->getValue();
837  return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
838  }
839 
840  template <int N, int M>
841  bool isImmInRange() const {
842  if (!isImm())
843  return false;
844  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
845  if (!MCE)
846  return false;
847  int64_t Val = MCE->getValue();
848  return (Val >= N && Val <= M);
849  }
850 
851  // NOTE: Also used for isLogicalImmNot as anything that can be represented as
852  // a logical immediate can always be represented when inverted.
853  template <typename T>
854  bool isLogicalImm() const {
855  if (!isImm())
856  return false;
857  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
858  if (!MCE)
859  return false;
860 
861  int64_t Val = MCE->getValue();
862  // Avoid left shift by 64 directly.
863  uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
864  // Allow all-0 or all-1 in top bits to permit bitwise NOT.
865  if ((Val & Upper) && (Val & Upper) != Upper)
866  return false;
867 
868  return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
869  }
870 
871  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
872 
873  /// Returns the immediate value as a pair of (imm, shift) if the immediate is
874  /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
875  /// immediate that can be shifted by 'Shift'.
876  template <unsigned Width>
877  Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
878  if (isShiftedImm() && Width == getShiftedImmShift())
879  if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
880  return std::make_pair(CE->getValue(), Width);
881 
882  if (isImm())
883  if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
884  int64_t Val = CE->getValue();
885  if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
886  return std::make_pair(Val >> Width, Width);
887  else
888  return std::make_pair(Val, 0u);
889  }
890 
891  return {};
892  }
893 
894  bool isAddSubImm() const {
895  if (!isShiftedImm() && !isImm())
896  return false;
897 
898  const MCExpr *Expr;
899 
900  // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
901  if (isShiftedImm()) {
902  unsigned Shift = ShiftedImm.ShiftAmount;
903  Expr = ShiftedImm.Val;
904  if (Shift != 0 && Shift != 12)
905  return false;
906  } else {
907  Expr = getImm();
908  }
909 
910  AArch64MCExpr::VariantKind ELFRefKind;
911  MCSymbolRefExpr::VariantKind DarwinRefKind;
912  int64_t Addend;
913  if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
914  DarwinRefKind, Addend)) {
915  return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
916  || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
917  || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
918  || ELFRefKind == AArch64MCExpr::VK_LO12
919  || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
920  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
921  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
922  || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
923  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
924  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
925  || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
926  || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
927  || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
928  }
929 
930  // If it's a constant, it should be a real immediate in range.
931  if (auto ShiftedVal = getShiftedVal<12>())
932  return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
933 
934  // If it's an expression, we hope for the best and let the fixup/relocation
935  // code deal with it.
936  return true;
937  }
938 
939  bool isAddSubImmNeg() const {
940  if (!isShiftedImm() && !isImm())
941  return false;
942 
943  // Otherwise it should be a real negative immediate in range.
944  if (auto ShiftedVal = getShiftedVal<12>())
945  return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
946 
947  return false;
948  }
949 
950  // Signed value in the range -128 to +127. For element widths of
951  // 16 bits or higher it may also be a signed multiple of 256 in the
952  // range -32768 to +32512.
953  // For element-width of 8 bits a range of -128 to 255 is accepted,
954  // since a copy of a byte can be either signed/unsigned.
955  template <typename T>
957  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
959 
960  bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
961  std::is_same<int8_t, T>::value;
962  if (auto ShiftedImm = getShiftedVal<8>())
963  if (!(IsByte && ShiftedImm->second) &&
964  AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
965  << ShiftedImm->second))
967 
969  }
970 
971  // Unsigned value in the range 0 to 255. For element widths of
972  // 16 bits or higher it may also be a signed multiple of 256 in the
973  // range 0 to 65280.
974  template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
975  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
977 
978  bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
979  std::is_same<int8_t, T>::value;
980  if (auto ShiftedImm = getShiftedVal<8>())
981  if (!(IsByte && ShiftedImm->second) &&
982  AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
983  << ShiftedImm->second))
985 
987  }
988 
989  template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
990  if (isLogicalImm<T>() && !isSVECpyImm<T>())
993  }
994 
995  bool isCondCode() const { return Kind == k_CondCode; }
996 
997  bool isSIMDImmType10() const {
998  if (!isImm())
999  return false;
1000  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1001  if (!MCE)
1002  return false;
1004  }
1005 
1006  template<int N>
1007  bool isBranchTarget() const {
1008  if (!isImm())
1009  return false;
1010  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1011  if (!MCE)
1012  return true;
1013  int64_t Val = MCE->getValue();
1014  if (Val & 0x3)
1015  return false;
1016  assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1017  return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1018  }
1019 
1020  bool
1021  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1022  if (!isImm())
1023  return false;
1024 
1025  AArch64MCExpr::VariantKind ELFRefKind;
1026  MCSymbolRefExpr::VariantKind DarwinRefKind;
1027  int64_t Addend;
1028  if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1029  DarwinRefKind, Addend)) {
1030  return false;
1031  }
1032  if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1033  return false;
1034 
1035  return llvm::is_contained(AllowedModifiers, ELFRefKind);
1036  }
1037 
1038  bool isMovWSymbolG3() const {
1039  return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
1040  }
1041 
1042  bool isMovWSymbolG2() const {
1043  return isMovWSymbol(
1048  }
1049 
1050  bool isMovWSymbolG1() const {
1051  return isMovWSymbol(
1057  }
1058 
1059  bool isMovWSymbolG0() const {
1060  return isMovWSymbol(
1066  }
1067 
1068  template<int RegWidth, int Shift>
1069  bool isMOVZMovAlias() const {
1070  if (!isImm()) return false;
1071 
1072  const MCExpr *E = getImm();
1073  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1074  uint64_t Value = CE->getValue();
1075 
1076  return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1077  }
1078  // Only supports the case of Shift being 0 if an expression is used as an
1079  // operand
1080  return !Shift && E;
1081  }
1082 
1083  template<int RegWidth, int Shift>
1084  bool isMOVNMovAlias() const {
1085  if (!isImm()) return false;
1086 
1087  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1088  if (!CE) return false;
1089  uint64_t Value = CE->getValue();
1090 
1091  return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1092  }
1093 
1094  bool isFPImm() const {
1095  return Kind == k_FPImm &&
1096  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1097  }
1098 
1099  bool isBarrier() const {
1100  return Kind == k_Barrier && !getBarriernXSModifier();
1101  }
1102  bool isBarriernXS() const {
1103  return Kind == k_Barrier && getBarriernXSModifier();
1104  }
1105  bool isSysReg() const { return Kind == k_SysReg; }
1106 
1107  bool isMRSSystemRegister() const {
1108  if (!isSysReg()) return false;
1109 
1110  return SysReg.MRSReg != -1U;
1111  }
1112 
1113  bool isMSRSystemRegister() const {
1114  if (!isSysReg()) return false;
1115  return SysReg.MSRReg != -1U;
1116  }
1117 
1118  bool isSystemPStateFieldWithImm0_1() const {
1119  if (!isSysReg()) return false;
1120  return (SysReg.PStateField == AArch64PState::PAN ||
1121  SysReg.PStateField == AArch64PState::DIT ||
1122  SysReg.PStateField == AArch64PState::UAO ||
1123  SysReg.PStateField == AArch64PState::SSBS);
1124  }
1125 
1126  bool isSystemPStateFieldWithImm0_15() const {
1127  if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1128  return SysReg.PStateField != -1U;
1129  }
1130 
1131  bool isSVCR() const {
1132  if (Kind != k_SVCR)
1133  return false;
1134  return SVCR.PStateField != -1U;
1135  }
1136 
1137  bool isReg() const override {
1138  return Kind == k_Register;
1139  }
1140 
1141  bool isScalarReg() const {
1142  return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1143  }
1144 
1145  bool isNeonVectorReg() const {
1146  return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1147  }
1148 
1149  bool isNeonVectorRegLo() const {
1150  return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1151  (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1152  Reg.RegNum) ||
1153  AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1154  Reg.RegNum));
1155  }
1156 
1157  bool isMatrix() const { return Kind == k_MatrixRegister; }
1158  bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1159 
1160  template <unsigned Class> bool isSVEVectorReg() const {
1161  RegKind RK;
1162  switch (Class) {
1163  case AArch64::ZPRRegClassID:
1164  case AArch64::ZPR_3bRegClassID:
1165  case AArch64::ZPR_4bRegClassID:
1166  RK = RegKind::SVEDataVector;
1167  break;
1168  case AArch64::PPRRegClassID:
1169  case AArch64::PPR_3bRegClassID:
1170  RK = RegKind::SVEPredicateVector;
1171  break;
1172  default:
1173  llvm_unreachable("Unsupport register class");
1174  }
1175 
1176  return (Kind == k_Register && Reg.Kind == RK) &&
1177  AArch64MCRegisterClasses[Class].contains(getReg());
1178  }
1179 
1180  template <unsigned Class> bool isFPRasZPR() const {
1181  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1182  AArch64MCRegisterClasses[Class].contains(getReg());
1183  }
1184 
1185  template <int ElementWidth, unsigned Class>
1186  DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1187  if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1189 
1190  if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1192 
1194  }
1195 
1196  template <int ElementWidth, unsigned Class>
1197  DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1198  if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1200 
1201  if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1203 
1205  }
1206 
1207  template <int ElementWidth, unsigned Class,
1208  AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1209  bool ShiftWidthAlwaysSame>
1210  DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1211  auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1212  if (!VectorMatch.isMatch())
1214 
1215  // Give a more specific diagnostic when the user has explicitly typed in
1216  // a shift-amount that does not match what is expected, but for which
1217  // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1218  bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1219  if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1220  ShiftExtendTy == AArch64_AM::SXTW) &&
1221  !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1223 
1224  if (MatchShift && ShiftExtendTy == getShiftExtendType())
1226 
1228  }
1229 
1230  bool isGPR32as64() const {
1231  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1232  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1233  }
1234 
1235  bool isGPR64as32() const {
1236  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1237  AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1238  }
1239 
1240  bool isGPR64x8() const {
1241  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1242  AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1243  Reg.RegNum);
1244  }
1245 
1246  bool isWSeqPair() const {
1247  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1248  AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1249  Reg.RegNum);
1250  }
1251 
1252  bool isXSeqPair() const {
1253  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1254  AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1255  Reg.RegNum);
1256  }
1257 
1258  template<int64_t Angle, int64_t Remainder>
1259  DiagnosticPredicate isComplexRotation() const {
1260  if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1261 
1262  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1263  if (!CE) return DiagnosticPredicateTy::NoMatch;
1264  uint64_t Value = CE->getValue();
1265 
1266  if (Value % Angle == Remainder && Value <= 270)
1269  }
1270 
1271  template <unsigned RegClassID> bool isGPR64() const {
1272  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1273  AArch64MCRegisterClasses[RegClassID].contains(getReg());
1274  }
1275 
1276  template <unsigned RegClassID, int ExtWidth>
1277  DiagnosticPredicate isGPR64WithShiftExtend() const {
1278  if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1280 
1281  if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1282  getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1285  }
1286 
1287  /// Is this a vector list with the type implicit (presumably attached to the
1288  /// instruction itself)?
1289  template <RegKind VectorKind, unsigned NumRegs>
1290  bool isImplicitlyTypedVectorList() const {
1291  return Kind == k_VectorList && VectorList.Count == NumRegs &&
1292  VectorList.NumElements == 0 &&
1293  VectorList.RegisterKind == VectorKind;
1294  }
1295 
1296  template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1297  unsigned ElementWidth>
1298  bool isTypedVectorList() const {
1299  if (Kind != k_VectorList)
1300  return false;
1301  if (VectorList.Count != NumRegs)
1302  return false;
1303  if (VectorList.RegisterKind != VectorKind)
1304  return false;
1305  if (VectorList.ElementWidth != ElementWidth)
1306  return false;
1307  return VectorList.NumElements == NumElements;
1308  }
1309 
1310  template <int Min, int Max>
1311  DiagnosticPredicate isVectorIndex() const {
1312  if (Kind != k_VectorIndex)
1314  if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1317  }
1318 
1319  bool isToken() const override { return Kind == k_Token; }
1320 
1321  bool isTokenEqual(StringRef Str) const {
1322  return Kind == k_Token && getToken() == Str;
1323  }
1324  bool isSysCR() const { return Kind == k_SysCR; }
1325  bool isPrefetch() const { return Kind == k_Prefetch; }
1326  bool isPSBHint() const { return Kind == k_PSBHint; }
1327  bool isBTIHint() const { return Kind == k_BTIHint; }
1328  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1329  bool isShifter() const {
1330  if (!isShiftExtend())
1331  return false;
1332 
1333  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1334  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1335  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1336  ST == AArch64_AM::MSL);
1337  }
1338 
1339  template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1340  if (Kind != k_FPImm)
1342 
1343  if (getFPImmIsExact()) {
1344  // Lookup the immediate from table of supported immediates.
1345  auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1346  assert(Desc && "Unknown enum value");
1347 
1348  // Calculate its FP value.
1349  APFloat RealVal(APFloat::IEEEdouble());
1350  auto StatusOrErr =
1351  RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1352  if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1353  llvm_unreachable("FP immediate is not exact");
1354 
1355  if (getFPImm().bitwiseIsEqual(RealVal))
1357  }
1358 
1360  }
1361 
1362  template <unsigned ImmA, unsigned ImmB>
1363  DiagnosticPredicate isExactFPImm() const {
1365  if ((Res = isExactFPImm<ImmA>()))
1367  if ((Res = isExactFPImm<ImmB>()))
1369  return Res;
1370  }
1371 
1372  bool isExtend() const {
1373  if (!isShiftExtend())
1374  return false;
1375 
1376  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1377  return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1378  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1379  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1380  ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1381  ET == AArch64_AM::LSL) &&
1382  getShiftExtendAmount() <= 4;
1383  }
1384 
1385  bool isExtend64() const {
1386  if (!isExtend())
1387  return false;
1388  // Make sure the extend expects a 32-bit source register.
1389  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1390  return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1391  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1392  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1393  }
1394 
1395  bool isExtendLSL64() const {
1396  if (!isExtend())
1397  return false;
1398  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1399  return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1400  ET == AArch64_AM::LSL) &&
1401  getShiftExtendAmount() <= 4;
1402  }
1403 
1404  template<int Width> bool isMemXExtend() const {
1405  if (!isExtend())
1406  return false;
1407  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1408  return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1409  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1410  getShiftExtendAmount() == 0);
1411  }
1412 
1413  template<int Width> bool isMemWExtend() const {
1414  if (!isExtend())
1415  return false;
1416  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1417  return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1418  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1419  getShiftExtendAmount() == 0);
1420  }
1421 
1422  template <unsigned width>
1423  bool isArithmeticShifter() const {
1424  if (!isShifter())
1425  return false;
1426 
1427  // An arithmetic shifter is LSL, LSR, or ASR.
1428  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1429  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1430  ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1431  }
1432 
1433  template <unsigned width>
1434  bool isLogicalShifter() const {
1435  if (!isShifter())
1436  return false;
1437 
1438  // A logical shifter is LSL, LSR, ASR or ROR.
1439  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1440  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1441  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1442  getShiftExtendAmount() < width;
1443  }
1444 
1445  bool isMovImm32Shifter() const {
1446  if (!isShifter())
1447  return false;
1448 
1449  // A MOVi shifter is LSL of 0, 16, 32, or 48.
1450  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1451  if (ST != AArch64_AM::LSL)
1452  return false;
1453  uint64_t Val = getShiftExtendAmount();
1454  return (Val == 0 || Val == 16);
1455  }
1456 
1457  bool isMovImm64Shifter() const {
1458  if (!isShifter())
1459  return false;
1460 
1461  // A MOVi shifter is LSL of 0 or 16.
1462  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1463  if (ST != AArch64_AM::LSL)
1464  return false;
1465  uint64_t Val = getShiftExtendAmount();
1466  return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1467  }
1468 
1469  bool isLogicalVecShifter() const {
1470  if (!isShifter())
1471  return false;
1472 
1473  // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1474  unsigned Shift = getShiftExtendAmount();
1475  return getShiftExtendType() == AArch64_AM::LSL &&
1476  (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1477  }
1478 
1479  bool isLogicalVecHalfWordShifter() const {
1480  if (!isLogicalVecShifter())
1481  return false;
1482 
1483  // A logical vector shifter is a left shift by 0 or 8.
1484  unsigned Shift = getShiftExtendAmount();
1485  return getShiftExtendType() == AArch64_AM::LSL &&
1486  (Shift == 0 || Shift == 8);
1487  }
1488 
1489  bool isMoveVecShifter() const {
1490  if (!isShiftExtend())
1491  return false;
1492 
1493  // A logical vector shifter is a left shift by 8 or 16.
1494  unsigned Shift = getShiftExtendAmount();
1495  return getShiftExtendType() == AArch64_AM::MSL &&
1496  (Shift == 8 || Shift == 16);
1497  }
1498 
1499  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1500  // to LDUR/STUR when the offset is not legal for the former but is for
1501  // the latter. As such, in addition to checking for being a legal unscaled
1502  // address, also check that it is not a legal scaled address. This avoids
1503  // ambiguity in the matcher.
1504  template<int Width>
1505  bool isSImm9OffsetFB() const {
1506  return isSImm<9>() && !isUImm12Offset<Width / 8>();
1507  }
1508 
1509  bool isAdrpLabel() const {
1510  // Validation was handled during parsing, so we just verify that
1511  // something didn't go haywire.
1512  if (!isImm())
1513  return false;
1514 
1515  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1516  int64_t Val = CE->getValue();
1517  int64_t Min = - (4096 * (1LL << (21 - 1)));
1518  int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1519  return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1520  }
1521 
1522  return true;
1523  }
1524 
1525  bool isAdrLabel() const {
1526  // Validation was handled during parsing, so we just verify that
1527  // something didn't go haywire.
1528  if (!isImm())
1529  return false;
1530 
1531  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1532  int64_t Val = CE->getValue();
1533  int64_t Min = - (1LL << (21 - 1));
1534  int64_t Max = ((1LL << (21 - 1)) - 1);
1535  return Val >= Min && Val <= Max;
1536  }
1537 
1538  return true;
1539  }
1540 
1541  template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1542  DiagnosticPredicate isMatrixRegOperand() const {
1543  if (!isMatrix())
1545  if (getMatrixKind() != Kind ||
1546  !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1547  EltSize != getMatrixElementWidth())
1550  }
1551 
1552  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1553  // Add as immediates when possible. Null MCExpr = 0.
1554  if (!Expr)
1556  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1557  Inst.addOperand(MCOperand::createImm(CE->getValue()));
1558  else
1559  Inst.addOperand(MCOperand::createExpr(Expr));
1560  }
1561 
1562  void addRegOperands(MCInst &Inst, unsigned N) const {
1563  assert(N == 1 && "Invalid number of operands!");
1565  }
1566 
1567  void addMatrixOperands(MCInst &Inst, unsigned N) const {
1568  assert(N == 1 && "Invalid number of operands!");
1569  Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1570  }
1571 
1572  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1573  assert(N == 1 && "Invalid number of operands!");
1574  assert(
1575  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1576 
1577  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1578  uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1579  RI->getEncodingValue(getReg()));
1580 
1582  }
1583 
1584  void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1585  assert(N == 1 && "Invalid number of operands!");
1586  assert(
1587  AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1588 
1589  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1590  uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1591  RI->getEncodingValue(getReg()));
1592 
1594  }
1595 
1596  template <int Width>
1597  void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1598  unsigned Base;
1599  switch (Width) {
1600  case 8: Base = AArch64::B0; break;
1601  case 16: Base = AArch64::H0; break;
1602  case 32: Base = AArch64::S0; break;
1603  case 64: Base = AArch64::D0; break;
1604  case 128: Base = AArch64::Q0; break;
1605  default:
1606  llvm_unreachable("Unsupported width");
1607  }
1608  Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1609  }
1610 
1611  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1612  assert(N == 1 && "Invalid number of operands!");
1613  assert(
1614  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1615  Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1616  }
1617 
1618  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1619  assert(N == 1 && "Invalid number of operands!");
1620  assert(
1621  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1623  }
1624 
1625  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1626  assert(N == 1 && "Invalid number of operands!");
1628  }
1629 
1630  enum VecListIndexType {
1631  VecListIdx_DReg = 0,
1632  VecListIdx_QReg = 1,
1633  VecListIdx_ZReg = 2,
1634  };
1635 
1636  template <VecListIndexType RegTy, unsigned NumRegs>
1637  void addVectorListOperands(MCInst &Inst, unsigned N) const {
1638  assert(N == 1 && "Invalid number of operands!");
1639  static const unsigned FirstRegs[][5] = {
1640  /* DReg */ { AArch64::Q0,
1641  AArch64::D0, AArch64::D0_D1,
1642  AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1643  /* QReg */ { AArch64::Q0,
1644  AArch64::Q0, AArch64::Q0_Q1,
1645  AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1646  /* ZReg */ { AArch64::Z0,
1647  AArch64::Z0, AArch64::Z0_Z1,
1648  AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1649  };
1650 
1651  assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1652  " NumRegs must be <= 4 for ZRegs");
1653 
1654  unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1655  Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1656  FirstRegs[(unsigned)RegTy][0]));
1657  }
1658 
1659  void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1660  assert(N == 1 && "Invalid number of operands!");
1661  unsigned RegMask = getMatrixTileListRegMask();
1662  assert(RegMask <= 0xFF && "Invalid mask!");
1663  Inst.addOperand(MCOperand::createImm(RegMask));
1664  }
1665 
1666  void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1667  assert(N == 1 && "Invalid number of operands!");
1668  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1669  }
1670 
1671  template <unsigned ImmIs0, unsigned ImmIs1>
1672  void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1673  assert(N == 1 && "Invalid number of operands!");
1674  assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1675  Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1676  }
1677 
1678  void addImmOperands(MCInst &Inst, unsigned N) const {
1679  assert(N == 1 && "Invalid number of operands!");
1680  // If this is a pageoff symrefexpr with an addend, adjust the addend
1681  // to be only the page-offset portion. Otherwise, just add the expr
1682  // as-is.
1683  addExpr(Inst, getImm());
1684  }
1685 
1686  template <int Shift>
1687  void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1688  assert(N == 2 && "Invalid number of operands!");
1689  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1690  Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1691  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1692  } else if (isShiftedImm()) {
1693  addExpr(Inst, getShiftedImmVal());
1694  Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1695  } else {
1696  addExpr(Inst, getImm());
1698  }
1699  }
1700 
1701  template <int Shift>
1702  void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1703  assert(N == 2 && "Invalid number of operands!");
1704  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1705  Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1706  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1707  } else
1708  llvm_unreachable("Not a shifted negative immediate");
1709  }
1710 
1711  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1712  assert(N == 1 && "Invalid number of operands!");
1714  }
1715 
1716  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1717  assert(N == 1 && "Invalid number of operands!");
1718  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1719  if (!MCE)
1720  addExpr(Inst, getImm());
1721  else
1722  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1723  }
1724 
1725  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1726  addImmOperands(Inst, N);
1727  }
1728 
1729  template<int Scale>
1730  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1731  assert(N == 1 && "Invalid number of operands!");
1732  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1733 
1734  if (!MCE) {
1735  Inst.addOperand(MCOperand::createExpr(getImm()));
1736  return;
1737  }
1738  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1739  }
1740 
1741  void addUImm6Operands(MCInst &Inst, unsigned N) const {
1742  assert(N == 1 && "Invalid number of operands!");
1743  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1745  }
1746 
1747  template <int Scale>
1748  void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1749  assert(N == 1 && "Invalid number of operands!");
1750  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1751  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1752  }
1753 
1754  template <typename T>
1755  void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1756  assert(N == 1 && "Invalid number of operands!");
1757  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1758  std::make_unsigned_t<T> Val = MCE->getValue();
1759  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1760  Inst.addOperand(MCOperand::createImm(encoding));
1761  }
1762 
1763  template <typename T>
1764  void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1765  assert(N == 1 && "Invalid number of operands!");
1766  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1767  std::make_unsigned_t<T> Val = ~MCE->getValue();
1768  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1769  Inst.addOperand(MCOperand::createImm(encoding));
1770  }
1771 
1772  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1773  assert(N == 1 && "Invalid number of operands!");
1774  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1776  Inst.addOperand(MCOperand::createImm(encoding));
1777  }
1778 
1779  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1780  // Branch operands don't encode the low bits, so shift them off
1781  // here. If it's a label, however, just put it on directly as there's
1782  // not enough information now to do anything.
1783  assert(N == 1 && "Invalid number of operands!");
1784  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1785  if (!MCE) {
1786  addExpr(Inst, getImm());
1787  return;
1788  }
1789  assert(MCE && "Invalid constant immediate operand!");
1790  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1791  }
1792 
1793  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1794  // Branch operands don't encode the low bits, so shift them off
1795  // here. If it's a label, however, just put it on directly as there's
1796  // not enough information now to do anything.
1797  assert(N == 1 && "Invalid number of operands!");
1798  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1799  if (!MCE) {
1800  addExpr(Inst, getImm());
1801  return;
1802  }
1803  assert(MCE && "Invalid constant immediate operand!");
1804  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1805  }
1806 
1807  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1808  // Branch operands don't encode the low bits, so shift them off
1809  // here. If it's a label, however, just put it on directly as there's
1810  // not enough information now to do anything.
1811  assert(N == 1 && "Invalid number of operands!");
1812  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1813  if (!MCE) {
1814  addExpr(Inst, getImm());
1815  return;
1816  }
1817  assert(MCE && "Invalid constant immediate operand!");
1818  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1819  }
1820 
1821  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1822  assert(N == 1 && "Invalid number of operands!");
1824  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1825  }
1826 
1827  void addBarrierOperands(MCInst &Inst, unsigned N) const {
1828  assert(N == 1 && "Invalid number of operands!");
1829  Inst.addOperand(MCOperand::createImm(getBarrier()));
1830  }
1831 
1832  void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
1833  assert(N == 1 && "Invalid number of operands!");
1834  Inst.addOperand(MCOperand::createImm(getBarrier()));
1835  }
1836 
1837  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1838  assert(N == 1 && "Invalid number of operands!");
1839 
1840  Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1841  }
1842 
1843  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1844  assert(N == 1 && "Invalid number of operands!");
1845 
1846  Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1847  }
1848 
1849  void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1850  assert(N == 1 && "Invalid number of operands!");
1851 
1852  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1853  }
1854 
1855  void addSVCROperands(MCInst &Inst, unsigned N) const {
1856  assert(N == 1 && "Invalid number of operands!");
1857 
1858  Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
1859  }
1860 
1861  void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1862  assert(N == 1 && "Invalid number of operands!");
1863 
1864  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1865  }
1866 
1867  void addSysCROperands(MCInst &Inst, unsigned N) const {
1868  assert(N == 1 && "Invalid number of operands!");
1869  Inst.addOperand(MCOperand::createImm(getSysCR()));
1870  }
1871 
1872  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1873  assert(N == 1 && "Invalid number of operands!");
1874  Inst.addOperand(MCOperand::createImm(getPrefetch()));
1875  }
1876 
1877  void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1878  assert(N == 1 && "Invalid number of operands!");
1879  Inst.addOperand(MCOperand::createImm(getPSBHint()));
1880  }
1881 
1882  void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1883  assert(N == 1 && "Invalid number of operands!");
1884  Inst.addOperand(MCOperand::createImm(getBTIHint()));
1885  }
1886 
1887  void addShifterOperands(MCInst &Inst, unsigned N) const {
1888  assert(N == 1 && "Invalid number of operands!");
1889  unsigned Imm =
1890  AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1891  Inst.addOperand(MCOperand::createImm(Imm));
1892  }
1893 
1894  void addExtendOperands(MCInst &Inst, unsigned N) const {
1895  assert(N == 1 && "Invalid number of operands!");
1896  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1897  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1898  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1899  Inst.addOperand(MCOperand::createImm(Imm));
1900  }
1901 
1902  void addExtend64Operands(MCInst &Inst, unsigned N) const {
1903  assert(N == 1 && "Invalid number of operands!");
1904  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1905  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1906  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1907  Inst.addOperand(MCOperand::createImm(Imm));
1908  }
1909 
1910  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1911  assert(N == 2 && "Invalid number of operands!");
1912  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1913  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1914  Inst.addOperand(MCOperand::createImm(IsSigned));
1915  Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1916  }
1917 
1918  // For 8-bit load/store instructions with a register offset, both the
1919  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1920  // they're disambiguated by whether the shift was explicit or implicit rather
1921  // than its size.
1922  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1923  assert(N == 2 && "Invalid number of operands!");
1924  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1925  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1926  Inst.addOperand(MCOperand::createImm(IsSigned));
1927  Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1928  }
1929 
1930  template<int Shift>
1931  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1932  assert(N == 1 && "Invalid number of operands!");
1933 
1934  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1935  if (CE) {
1936  uint64_t Value = CE->getValue();
1937  Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1938  } else {
1939  addExpr(Inst, getImm());
1940  }
1941  }
1942 
1943  template<int Shift>
1944  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1945  assert(N == 1 && "Invalid number of operands!");
1946 
1947  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1948  uint64_t Value = CE->getValue();
1949  Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1950  }
1951 
1952  void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1953  assert(N == 1 && "Invalid number of operands!");
1954  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1955  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1956  }
1957 
1958  void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1959  assert(N == 1 && "Invalid number of operands!");
1960  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1961  Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1962  }
1963 
1964  void print(raw_ostream &OS) const override;
1965 
1966  static std::unique_ptr<AArch64Operand>
1967  CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
1968  auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1969  Op->Tok.Data = Str.data();
1970  Op->Tok.Length = Str.size();
1971  Op->Tok.IsSuffix = IsSuffix;
1972  Op->StartLoc = S;
1973  Op->EndLoc = S;
1974  return Op;
1975  }
1976 
1977  static std::unique_ptr<AArch64Operand>
1978  CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1979  RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1981  unsigned ShiftAmount = 0,
1982  unsigned HasExplicitAmount = false) {
1983  auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1984  Op->Reg.RegNum = RegNum;
1985  Op->Reg.Kind = Kind;
1986  Op->Reg.ElementWidth = 0;
1987  Op->Reg.EqualityTy = EqTy;
1988  Op->Reg.ShiftExtend.Type = ExtTy;
1989  Op->Reg.ShiftExtend.Amount = ShiftAmount;
1990  Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1991  Op->StartLoc = S;
1992  Op->EndLoc = E;
1993  return Op;
1994  }
1995 
1996  static std::unique_ptr<AArch64Operand>
1997  CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1998  SMLoc S, SMLoc E, MCContext &Ctx,
2000  unsigned ShiftAmount = 0,
2001  unsigned HasExplicitAmount = false) {
2002  assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2003  Kind == RegKind::SVEPredicateVector) &&
2004  "Invalid vector kind");
2005  auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2006  HasExplicitAmount);
2007  Op->Reg.ElementWidth = ElementWidth;
2008  return Op;
2009  }
2010 
2011  static std::unique_ptr<AArch64Operand>
2012  CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
2013  unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
2014  MCContext &Ctx) {
2015  auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2016  Op->VectorList.RegNum = RegNum;
2017  Op->VectorList.Count = Count;
2018  Op->VectorList.NumElements = NumElements;
2019  Op->VectorList.ElementWidth = ElementWidth;
2020  Op->VectorList.RegisterKind = RegisterKind;
2021  Op->StartLoc = S;
2022  Op->EndLoc = E;
2023  return Op;
2024  }
2025 
2026  static std::unique_ptr<AArch64Operand>
2027  CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2028  auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2029  Op->VectorIndex.Val = Idx;
2030  Op->StartLoc = S;
2031  Op->EndLoc = E;
2032  return Op;
2033  }
2034 
2035  static std::unique_ptr<AArch64Operand>
2036  CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2037  auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2038  Op->MatrixTileList.RegMask = RegMask;
2039  Op->StartLoc = S;
2040  Op->EndLoc = E;
2041  return Op;
2042  }
2043 
2044  static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2045  const unsigned ElementWidth) {
2046  static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2047  RegMap = {
2048  {{0, AArch64::ZAB0},
2049  {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2050  AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2051  {{8, AArch64::ZAB0},
2052  {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2053  AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2054  {{16, AArch64::ZAH0},
2055  {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2056  {{16, AArch64::ZAH1},
2057  {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2058  {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2059  {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2060  {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2061  {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2062  };
2063 
2064  if (ElementWidth == 64)
2065  OutRegs.insert(Reg);
2066  else {
2067  std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2068  assert(!Regs.empty() && "Invalid tile or element width!");
2069  for (auto OutReg : Regs)
2070  OutRegs.insert(OutReg);
2071  }
2072  }
2073 
2074  static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2075  SMLoc E, MCContext &Ctx) {
2076  auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2077  Op->Imm.Val = Val;
2078  Op->StartLoc = S;
2079  Op->EndLoc = E;
2080  return Op;
2081  }
2082 
2083  static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2084  unsigned ShiftAmount,
2085  SMLoc S, SMLoc E,
2086  MCContext &Ctx) {
2087  auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2088  Op->ShiftedImm .Val = Val;
2089  Op->ShiftedImm.ShiftAmount = ShiftAmount;
2090  Op->StartLoc = S;
2091  Op->EndLoc = E;
2092  return Op;
2093  }
2094 
2095  static std::unique_ptr<AArch64Operand>
2096  CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2097  auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2098  Op->CondCode.Code = Code;
2099  Op->StartLoc = S;
2100  Op->EndLoc = E;
2101  return Op;
2102  }
2103 
2104  static std::unique_ptr<AArch64Operand>
2105  CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2106  auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2107  Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2108  Op->FPImm.IsExact = IsExact;
2109  Op->StartLoc = S;
2110  Op->EndLoc = S;
2111  return Op;
2112  }
2113 
2114  static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2115  StringRef Str,
2116  SMLoc S,
2117  MCContext &Ctx,
2118  bool HasnXSModifier) {
2119  auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2120  Op->Barrier.Val = Val;
2121  Op->Barrier.Data = Str.data();
2122  Op->Barrier.Length = Str.size();
2123  Op->Barrier.HasnXSModifier = HasnXSModifier;
2124  Op->StartLoc = S;
2125  Op->EndLoc = S;
2126  return Op;
2127  }
2128 
2129  static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2130  uint32_t MRSReg,
2131  uint32_t MSRReg,
2132  uint32_t PStateField,
2133  MCContext &Ctx) {
2134  auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2135  Op->SysReg.Data = Str.data();
2136  Op->SysReg.Length = Str.size();
2137  Op->SysReg.MRSReg = MRSReg;
2138  Op->SysReg.MSRReg = MSRReg;
2139  Op->SysReg.PStateField = PStateField;
2140  Op->StartLoc = S;
2141  Op->EndLoc = S;
2142  return Op;
2143  }
2144 
2145  static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2146  SMLoc E, MCContext &Ctx) {
2147  auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2148  Op->SysCRImm.Val = Val;
2149  Op->StartLoc = S;
2150  Op->EndLoc = E;
2151  return Op;
2152  }
2153 
2154  static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2155  StringRef Str,
2156  SMLoc S,
2157  MCContext &Ctx) {
2158  auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2159  Op->Prefetch.Val = Val;
2160  Op->Barrier.Data = Str.data();
2161  Op->Barrier.Length = Str.size();
2162  Op->StartLoc = S;
2163  Op->EndLoc = S;
2164  return Op;
2165  }
2166 
2167  static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2168  StringRef Str,
2169  SMLoc S,
2170  MCContext &Ctx) {
2171  auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2172  Op->PSBHint.Val = Val;
2173  Op->PSBHint.Data = Str.data();
2174  Op->PSBHint.Length = Str.size();
2175  Op->StartLoc = S;
2176  Op->EndLoc = S;
2177  return Op;
2178  }
2179 
2180  static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2181  StringRef Str,
2182  SMLoc S,
2183  MCContext &Ctx) {
2184  auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2185  Op->BTIHint.Val = Val | 32;
2186  Op->BTIHint.Data = Str.data();
2187  Op->BTIHint.Length = Str.size();
2188  Op->StartLoc = S;
2189  Op->EndLoc = S;
2190  return Op;
2191  }
2192 
2193  static std::unique_ptr<AArch64Operand>
2194  CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2195  SMLoc S, SMLoc E, MCContext &Ctx) {
2196  auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2197  Op->MatrixReg.RegNum = RegNum;
2198  Op->MatrixReg.ElementWidth = ElementWidth;
2199  Op->MatrixReg.Kind = Kind;
2200  Op->StartLoc = S;
2201  Op->EndLoc = E;
2202  return Op;
2203  }
2204 
2205  static std::unique_ptr<AArch64Operand>
2206  CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2207  auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2208  Op->SVCR.PStateField = PStateField;
2209  Op->SVCR.Data = Str.data();
2210  Op->SVCR.Length = Str.size();
2211  Op->StartLoc = S;
2212  Op->EndLoc = S;
2213  return Op;
2214  }
2215 
2216  static std::unique_ptr<AArch64Operand>
2217  CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2218  bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2219  auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2220  Op->ShiftExtend.Type = ShOp;
2221  Op->ShiftExtend.Amount = Val;
2222  Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2223  Op->StartLoc = S;
2224  Op->EndLoc = E;
2225  return Op;
2226  }
2227 };
2228 
2229 } // end anonymous namespace.
2230 
2231 void AArch64Operand::print(raw_ostream &OS) const {
2232  switch (Kind) {
2233  case k_FPImm:
2234  OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2235  if (!getFPImmIsExact())
2236  OS << " (inexact)";
2237  OS << ">";
2238  break;
2239  case k_Barrier: {
2240  StringRef Name = getBarrierName();
2241  if (!Name.empty())
2242  OS << "<barrier " << Name << ">";
2243  else
2244  OS << "<barrier invalid #" << getBarrier() << ">";
2245  break;
2246  }
2247  case k_Immediate:
2248  OS << *getImm();
2249  break;
2250  case k_ShiftedImm: {
2251  unsigned Shift = getShiftedImmShift();
2252  OS << "<shiftedimm ";
2253  OS << *getShiftedImmVal();
2254  OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2255  break;
2256  }
2257  case k_CondCode:
2258  OS << "<condcode " << getCondCode() << ">";
2259  break;
2260  case k_VectorList: {
2261  OS << "<vectorlist ";
2262  unsigned Reg = getVectorListStart();
2263  for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2264  OS << Reg + i << " ";
2265  OS << ">";
2266  break;
2267  }
2268  case k_VectorIndex:
2269  OS << "<vectorindex " << getVectorIndex() << ">";
2270  break;
2271  case k_SysReg:
2272  OS << "<sysreg: " << getSysReg() << '>';
2273  break;
2274  case k_Token:
2275  OS << "'" << getToken() << "'";
2276  break;
2277  case k_SysCR:
2278  OS << "c" << getSysCR();
2279  break;
2280  case k_Prefetch: {
2281  StringRef Name = getPrefetchName();
2282  if (!Name.empty())
2283  OS << "<prfop " << Name << ">";
2284  else
2285  OS << "<prfop invalid #" << getPrefetch() << ">";
2286  break;
2287  }
2288  case k_PSBHint:
2289  OS << getPSBHintName();
2290  break;
2291  case k_BTIHint:
2292  OS << getBTIHintName();
2293  break;
2294  case k_MatrixRegister:
2295  OS << "<matrix " << getMatrixReg() << ">";
2296  break;
2297  case k_MatrixTileList: {
2298  OS << "<matrixlist ";
2299  unsigned RegMask = getMatrixTileListRegMask();
2300  unsigned MaxBits = 8;
2301  for (unsigned I = MaxBits; I > 0; --I)
2302  OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2303  OS << '>';
2304  break;
2305  }
2306  case k_SVCR: {
2307  OS << getSVCR();
2308  break;
2309  }
2310  case k_Register:
2311  OS << "<register " << getReg() << ">";
2312  if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2313  break;
2314  LLVM_FALLTHROUGH;
2315  case k_ShiftExtend:
2316  OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2317  << getShiftExtendAmount();
2318  if (!hasShiftExtendAmount())
2319  OS << "<imp>";
2320  OS << '>';
2321  break;
2322  }
2323 }
2324 
2325 /// @name Auto-generated Match Functions
2326 /// {
2327 
2328 static unsigned MatchRegisterName(StringRef Name);
2329 
2330 /// }
2331 
2332 static unsigned MatchNeonVectorRegName(StringRef Name) {
2333  return StringSwitch<unsigned>(Name.lower())
2334  .Case("v0", AArch64::Q0)
2335  .Case("v1", AArch64::Q1)
2336  .Case("v2", AArch64::Q2)
2337  .Case("v3", AArch64::Q3)
2338  .Case("v4", AArch64::Q4)
2339  .Case("v5", AArch64::Q5)
2340  .Case("v6", AArch64::Q6)
2341  .Case("v7", AArch64::Q7)
2342  .Case("v8", AArch64::Q8)
2343  .Case("v9", AArch64::Q9)
2344  .Case("v10", AArch64::Q10)
2345  .Case("v11", AArch64::Q11)
2346  .Case("v12", AArch64::Q12)
2347  .Case("v13", AArch64::Q13)
2348  .Case("v14", AArch64::Q14)
2349  .Case("v15", AArch64::Q15)
2350  .Case("v16", AArch64::Q16)
2351  .Case("v17", AArch64::Q17)
2352  .Case("v18", AArch64::Q18)
2353  .Case("v19", AArch64::Q19)
2354  .Case("v20", AArch64::Q20)
2355  .Case("v21", AArch64::Q21)
2356  .Case("v22", AArch64::Q22)
2357  .Case("v23", AArch64::Q23)
2358  .Case("v24", AArch64::Q24)
2359  .Case("v25", AArch64::Q25)
2360  .Case("v26", AArch64::Q26)
2361  .Case("v27", AArch64::Q27)
2362  .Case("v28", AArch64::Q28)
2363  .Case("v29", AArch64::Q29)
2364  .Case("v30", AArch64::Q30)
2365  .Case("v31", AArch64::Q31)
2366  .Default(0);
2367 }
2368 
2369 /// Returns an optional pair of (#elements, element-width) if Suffix
2370 /// is a valid vector kind. Where the number of elements in a vector
2371 /// or the vector width is implicit or explicitly unknown (but still a
2372 /// valid suffix kind), 0 is used.
2373 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2374  RegKind VectorKind) {
2375  std::pair<int, int> Res = {-1, -1};
2376 
2377  switch (VectorKind) {
2378  case RegKind::NeonVector:
2379  Res =
2380  StringSwitch<std::pair<int, int>>(Suffix.lower())
2381  .Case("", {0, 0})
2382  .Case(".1d", {1, 64})
2383  .Case(".1q", {1, 128})
2384  // '.2h' needed for fp16 scalar pairwise reductions
2385  .Case(".2h", {2, 16})
2386  .Case(".2s", {2, 32})
2387  .Case(".2d", {2, 64})
2388  // '.4b' is another special case for the ARMv8.2a dot product
2389  // operand
2390  .Case(".4b", {4, 8})
2391  .Case(".4h", {4, 16})
2392  .Case(".4s", {4, 32})
2393  .Case(".8b", {8, 8})
2394  .Case(".8h", {8, 16})
2395  .Case(".16b", {16, 8})
2396  // Accept the width neutral ones, too, for verbose syntax. If those
2397  // aren't used in the right places, the token operand won't match so
2398  // all will work out.
2399  .Case(".b", {0, 8})
2400  .Case(".h", {0, 16})
2401  .Case(".s", {0, 32})
2402  .Case(".d", {0, 64})
2403  .Default({-1, -1});
2404  break;
2405  case RegKind::SVEPredicateVector:
2406  case RegKind::SVEDataVector:
2407  case RegKind::Matrix:
2408  Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2409  .Case("", {0, 0})
2410  .Case(".b", {0, 8})
2411  .Case(".h", {0, 16})
2412  .Case(".s", {0, 32})
2413  .Case(".d", {0, 64})
2414  .Case(".q", {0, 128})
2415  .Default({-1, -1});
2416  break;
2417  default:
2418  llvm_unreachable("Unsupported RegKind");
2419  }
2420 
2421  if (Res == std::make_pair(-1, -1))
2422  return Optional<std::pair<int, int>>();
2423 
2424  return Optional<std::pair<int, int>>(Res);
2425 }
2426 
2427 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2428  return parseVectorKind(Suffix, VectorKind).hasValue();
2429 }
2430 
2431 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2432  return StringSwitch<unsigned>(Name.lower())
2433  .Case("z0", AArch64::Z0)
2434  .Case("z1", AArch64::Z1)
2435  .Case("z2", AArch64::Z2)
2436  .Case("z3", AArch64::Z3)
2437  .Case("z4", AArch64::Z4)
2438  .Case("z5", AArch64::Z5)
2439  .Case("z6", AArch64::Z6)
2440  .Case("z7", AArch64::Z7)
2441  .Case("z8", AArch64::Z8)
2442  .Case("z9", AArch64::Z9)
2443  .Case("z10", AArch64::Z10)
2444  .Case("z11", AArch64::Z11)
2445  .Case("z12", AArch64::Z12)
2446  .Case("z13", AArch64::Z13)
2447  .Case("z14", AArch64::Z14)
2448  .Case("z15", AArch64::Z15)
2449  .Case("z16", AArch64::Z16)
2450  .Case("z17", AArch64::Z17)
2451  .Case("z18", AArch64::Z18)
2452  .Case("z19", AArch64::Z19)
2453  .Case("z20", AArch64::Z20)
2454  .Case("z21", AArch64::Z21)
2455  .Case("z22", AArch64::Z22)
2456  .Case("z23", AArch64::Z23)
2457  .Case("z24", AArch64::Z24)
2458  .Case("z25", AArch64::Z25)
2459  .Case("z26", AArch64::Z26)
2460  .Case("z27", AArch64::Z27)
2461  .Case("z28", AArch64::Z28)
2462  .Case("z29", AArch64::Z29)
2463  .Case("z30", AArch64::Z30)
2464  .Case("z31", AArch64::Z31)
2465  .Default(0);
2466 }
2467 
2468 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2469  return StringSwitch<unsigned>(Name.lower())
2470  .Case("p0", AArch64::P0)
2471  .Case("p1", AArch64::P1)
2472  .Case("p2", AArch64::P2)
2473  .Case("p3", AArch64::P3)
2474  .Case("p4", AArch64::P4)
2475  .Case("p5", AArch64::P5)
2476  .Case("p6", AArch64::P6)
2477  .Case("p7", AArch64::P7)
2478  .Case("p8", AArch64::P8)
2479  .Case("p9", AArch64::P9)
2480  .Case("p10", AArch64::P10)
2481  .Case("p11", AArch64::P11)
2482  .Case("p12", AArch64::P12)
2483  .Case("p13", AArch64::P13)
2484  .Case("p14", AArch64::P14)
2485  .Case("p15", AArch64::P15)
2486  .Default(0);
2487 }
2488 
2489 static unsigned matchMatrixTileListRegName(StringRef Name) {
2490  return StringSwitch<unsigned>(Name.lower())
2491  .Case("za0.d", AArch64::ZAD0)
2492  .Case("za1.d", AArch64::ZAD1)
2493  .Case("za2.d", AArch64::ZAD2)
2494  .Case("za3.d", AArch64::ZAD3)
2495  .Case("za4.d", AArch64::ZAD4)
2496  .Case("za5.d", AArch64::ZAD5)
2497  .Case("za6.d", AArch64::ZAD6)
2498  .Case("za7.d", AArch64::ZAD7)
2499  .Case("za0.s", AArch64::ZAS0)
2500  .Case("za1.s", AArch64::ZAS1)
2501  .Case("za2.s", AArch64::ZAS2)
2502  .Case("za3.s", AArch64::ZAS3)
2503  .Case("za0.h", AArch64::ZAH0)
2504  .Case("za1.h", AArch64::ZAH1)
2505  .Case("za0.b", AArch64::ZAB0)
2506  .Default(0);
2507 }
2508 
2509 static unsigned matchMatrixRegName(StringRef Name) {
2510  return StringSwitch<unsigned>(Name.lower())
2511  .Case("za", AArch64::ZA)
2512  .Case("za0.q", AArch64::ZAQ0)
2513  .Case("za1.q", AArch64::ZAQ1)
2514  .Case("za2.q", AArch64::ZAQ2)
2515  .Case("za3.q", AArch64::ZAQ3)
2516  .Case("za4.q", AArch64::ZAQ4)
2517  .Case("za5.q", AArch64::ZAQ5)
2518  .Case("za6.q", AArch64::ZAQ6)
2519  .Case("za7.q", AArch64::ZAQ7)
2520  .Case("za8.q", AArch64::ZAQ8)
2521  .Case("za9.q", AArch64::ZAQ9)
2522  .Case("za10.q", AArch64::ZAQ10)
2523  .Case("za11.q", AArch64::ZAQ11)
2524  .Case("za12.q", AArch64::ZAQ12)
2525  .Case("za13.q", AArch64::ZAQ13)
2526  .Case("za14.q", AArch64::ZAQ14)
2527  .Case("za15.q", AArch64::ZAQ15)
2528  .Case("za0.d", AArch64::ZAD0)
2529  .Case("za1.d", AArch64::ZAD1)
2530  .Case("za2.d", AArch64::ZAD2)
2531  .Case("za3.d", AArch64::ZAD3)
2532  .Case("za4.d", AArch64::ZAD4)
2533  .Case("za5.d", AArch64::ZAD5)
2534  .Case("za6.d", AArch64::ZAD6)
2535  .Case("za7.d", AArch64::ZAD7)
2536  .Case("za0.s", AArch64::ZAS0)
2537  .Case("za1.s", AArch64::ZAS1)
2538  .Case("za2.s", AArch64::ZAS2)
2539  .Case("za3.s", AArch64::ZAS3)
2540  .Case("za0.h", AArch64::ZAH0)
2541  .Case("za1.h", AArch64::ZAH1)
2542  .Case("za0.b", AArch64::ZAB0)
2543  .Case("za0h.q", AArch64::ZAQ0)
2544  .Case("za1h.q", AArch64::ZAQ1)
2545  .Case("za2h.q", AArch64::ZAQ2)
2546  .Case("za3h.q", AArch64::ZAQ3)
2547  .Case("za4h.q", AArch64::ZAQ4)
2548  .Case("za5h.q", AArch64::ZAQ5)
2549  .Case("za6h.q", AArch64::ZAQ6)
2550  .Case("za7h.q", AArch64::ZAQ7)
2551  .Case("za8h.q", AArch64::ZAQ8)
2552  .Case("za9h.q", AArch64::ZAQ9)
2553  .Case("za10h.q", AArch64::ZAQ10)
2554  .Case("za11h.q", AArch64::ZAQ11)
2555  .Case("za12h.q", AArch64::ZAQ12)
2556  .Case("za13h.q", AArch64::ZAQ13)
2557  .Case("za14h.q", AArch64::ZAQ14)
2558  .Case("za15h.q", AArch64::ZAQ15)
2559  .Case("za0h.d", AArch64::ZAD0)
2560  .Case("za1h.d", AArch64::ZAD1)
2561  .Case("za2h.d", AArch64::ZAD2)
2562  .Case("za3h.d", AArch64::ZAD3)
2563  .Case("za4h.d", AArch64::ZAD4)
2564  .Case("za5h.d", AArch64::ZAD5)
2565  .Case("za6h.d", AArch64::ZAD6)
2566  .Case("za7h.d", AArch64::ZAD7)
2567  .Case("za0h.s", AArch64::ZAS0)
2568  .Case("za1h.s", AArch64::ZAS1)
2569  .Case("za2h.s", AArch64::ZAS2)
2570  .Case("za3h.s", AArch64::ZAS3)
2571  .Case("za0h.h", AArch64::ZAH0)
2572  .Case("za1h.h", AArch64::ZAH1)
2573  .Case("za0h.b", AArch64::ZAB0)
2574  .Case("za0v.q", AArch64::ZAQ0)
2575  .Case("za1v.q", AArch64::ZAQ1)
2576  .Case("za2v.q", AArch64::ZAQ2)
2577  .Case("za3v.q", AArch64::ZAQ3)
2578  .Case("za4v.q", AArch64::ZAQ4)
2579  .Case("za5v.q", AArch64::ZAQ5)
2580  .Case("za6v.q", AArch64::ZAQ6)
2581  .Case("za7v.q", AArch64::ZAQ7)
2582  .Case("za8v.q", AArch64::ZAQ8)
2583  .Case("za9v.q", AArch64::ZAQ9)
2584  .Case("za10v.q", AArch64::ZAQ10)
2585  .Case("za11v.q", AArch64::ZAQ11)
2586  .Case("za12v.q", AArch64::ZAQ12)
2587  .Case("za13v.q", AArch64::ZAQ13)
2588  .Case("za14v.q", AArch64::ZAQ14)
2589  .Case("za15v.q", AArch64::ZAQ15)
2590  .Case("za0v.d", AArch64::ZAD0)
2591  .Case("za1v.d", AArch64::ZAD1)
2592  .Case("za2v.d", AArch64::ZAD2)
2593  .Case("za3v.d", AArch64::ZAD3)
2594  .Case("za4v.d", AArch64::ZAD4)
2595  .Case("za5v.d", AArch64::ZAD5)
2596  .Case("za6v.d", AArch64::ZAD6)
2597  .Case("za7v.d", AArch64::ZAD7)
2598  .Case("za0v.s", AArch64::ZAS0)
2599  .Case("za1v.s", AArch64::ZAS1)
2600  .Case("za2v.s", AArch64::ZAS2)
2601  .Case("za3v.s", AArch64::ZAS3)
2602  .Case("za0v.h", AArch64::ZAH0)
2603  .Case("za1v.h", AArch64::ZAH1)
2604  .Case("za0v.b", AArch64::ZAB0)
2605  .Default(0);
2606 }
2607 
2608 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2609  SMLoc &EndLoc) {
2610  return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
2611 }
2612 
2613 OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2614  SMLoc &StartLoc,
2615  SMLoc &EndLoc) {
2616  StartLoc = getLoc();
2617  auto Res = tryParseScalarRegister(RegNo);
2618  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2619  return Res;
2620 }
2621 
2622 // Matches a register name or register alias previously defined by '.req'
2623 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2624  RegKind Kind) {
2625  unsigned RegNum = 0;
2626  if ((RegNum = matchSVEDataVectorRegName(Name)))
2627  return Kind == RegKind::SVEDataVector ? RegNum : 0;
2628 
2629  if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2630  return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2631 
2632  if ((RegNum = MatchNeonVectorRegName(Name)))
2633  return Kind == RegKind::NeonVector ? RegNum : 0;
2634 
2635  if ((RegNum = matchMatrixRegName(Name)))
2636  return Kind == RegKind::Matrix ? RegNum : 0;
2637 
2638  // The parsed register must be of RegKind Scalar
2639  if ((RegNum = MatchRegisterName(Name)))
2640  return Kind == RegKind::Scalar ? RegNum : 0;
2641 
2642  if (!RegNum) {
2643  // Handle a few common aliases of registers.
2644  if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2645  .Case("fp", AArch64::FP)
2646  .Case("lr", AArch64::LR)
2647  .Case("x31", AArch64::XZR)
2648  .Case("w31", AArch64::WZR)
2649  .Default(0))
2650  return Kind == RegKind::Scalar ? RegNum : 0;
2651 
2652  // Check for aliases registered via .req. Canonicalize to lower case.
2653  // That's more consistent since register names are case insensitive, and
2654  // it's how the original entry was passed in from MC/MCParser/AsmParser.
2655  auto Entry = RegisterReqs.find(Name.lower());
2656  if (Entry == RegisterReqs.end())
2657  return 0;
2658 
2659  // set RegNum if the match is the right kind of register
2660  if (Kind == Entry->getValue().first)
2661  RegNum = Entry->getValue().second;
2662  }
2663  return RegNum;
2664 }
2665 
2666 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2667 /// Identifier when called, and if it is a register name the token is eaten and
2668 /// the register is added to the operand list.
2670 AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2671  const AsmToken &Tok = getTok();
2672  if (Tok.isNot(AsmToken::Identifier))
2673  return MatchOperand_NoMatch;
2674 
2675  std::string lowerCase = Tok.getString().lower();
2676  unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2677  if (Reg == 0)
2678  return MatchOperand_NoMatch;
2679 
2680  RegNum = Reg;
2681  Lex(); // Eat identifier token.
2682  return MatchOperand_Success;
2683 }
2684 
2685 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2687 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2688  SMLoc S = getLoc();
2689 
2690  if (getTok().isNot(AsmToken::Identifier)) {
2691  Error(S, "Expected cN operand where 0 <= N <= 15");
2692  return MatchOperand_ParseFail;
2693  }
2694 
2695  StringRef Tok = getTok().getIdentifier();
2696  if (Tok[0] != 'c' && Tok[0] != 'C') {
2697  Error(S, "Expected cN operand where 0 <= N <= 15");
2698  return MatchOperand_ParseFail;
2699  }
2700 
2701  uint32_t CRNum;
2702  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2703  if (BadNum || CRNum > 15) {
2704  Error(S, "Expected cN operand where 0 <= N <= 15");
2705  return MatchOperand_ParseFail;
2706  }
2707 
2708  Lex(); // Eat identifier token.
2709  Operands.push_back(
2710  AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2711  return MatchOperand_Success;
2712 }
2713 
2714 /// tryParsePrefetch - Try to parse a prefetch operand.
2715 template <bool IsSVEPrefetch>
2717 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2718  SMLoc S = getLoc();
2719  const AsmToken &Tok = getTok();
2720 
2721  auto LookupByName = [](StringRef N) {
2722  if (IsSVEPrefetch) {
2723  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2724  return Optional<unsigned>(Res->Encoding);
2725  } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2726  return Optional<unsigned>(Res->Encoding);
2727  return Optional<unsigned>();
2728  };
2729 
2730  auto LookupByEncoding = [](unsigned E) {
2731  if (IsSVEPrefetch) {
2732  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2733  return Optional<StringRef>(Res->Name);
2734  } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2735  return Optional<StringRef>(Res->Name);
2736  return Optional<StringRef>();
2737  };
2738  unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2739 
2740  // Either an identifier for named values or a 5-bit immediate.
2741  // Eat optional hash.
2742  if (parseOptionalToken(AsmToken::Hash) ||
2743  Tok.is(AsmToken::Integer)) {
2744  const MCExpr *ImmVal;
2745  if (getParser().parseExpression(ImmVal))
2746  return MatchOperand_ParseFail;
2747 
2748  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2749  if (!MCE) {
2750  TokError("immediate value expected for prefetch operand");
2751  return MatchOperand_ParseFail;
2752  }
2753  unsigned prfop = MCE->getValue();
2754  if (prfop > MaxVal) {
2755  TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2756  "] expected");
2757  return MatchOperand_ParseFail;
2758  }
2759 
2760  auto PRFM = LookupByEncoding(MCE->getValue());
2761  Operands.push_back(AArch64Operand::CreatePrefetch(
2762  prfop, PRFM.getValueOr(""), S, getContext()));
2763  return MatchOperand_Success;
2764  }
2765 
2766  if (Tok.isNot(AsmToken::Identifier)) {
2767  TokError("prefetch hint expected");
2768  return MatchOperand_ParseFail;
2769  }
2770 
2771  auto PRFM = LookupByName(Tok.getString());
2772  if (!PRFM) {
2773  TokError("prefetch hint expected");
2774  return MatchOperand_ParseFail;
2775  }
2776 
2777  Operands.push_back(AArch64Operand::CreatePrefetch(
2778  *PRFM, Tok.getString(), S, getContext()));
2779  Lex(); // Eat identifier token.
2780  return MatchOperand_Success;
2781 }
2782 
2783 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2785 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2786  SMLoc S = getLoc();
2787  const AsmToken &Tok = getTok();
2788  if (Tok.isNot(AsmToken::Identifier)) {
2789  TokError("invalid operand for instruction");
2790  return MatchOperand_ParseFail;
2791  }
2792 
2793  auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2794  if (!PSB) {
2795  TokError("invalid operand for instruction");
2796  return MatchOperand_ParseFail;
2797  }
2798 
2799  Operands.push_back(AArch64Operand::CreatePSBHint(
2800  PSB->Encoding, Tok.getString(), S, getContext()));
2801  Lex(); // Eat identifier token.
2802  return MatchOperand_Success;
2803 }
2804 
2805 /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2807 AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2808  SMLoc S = getLoc();
2809  const AsmToken &Tok = getTok();
2810  if (Tok.isNot(AsmToken::Identifier)) {
2811  TokError("invalid operand for instruction");
2812  return MatchOperand_ParseFail;
2813  }
2814 
2815  auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2816  if (!BTI) {
2817  TokError("invalid operand for instruction");
2818  return MatchOperand_ParseFail;
2819  }
2820 
2821  Operands.push_back(AArch64Operand::CreateBTIHint(
2822  BTI->Encoding, Tok.getString(), S, getContext()));
2823  Lex(); // Eat identifier token.
2824  return MatchOperand_Success;
2825 }
2826 
2827 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2828 /// instruction.
2830 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2831  SMLoc S = getLoc();
2832  const MCExpr *Expr = nullptr;
2833 
2834  if (getTok().is(AsmToken::Hash)) {
2835  Lex(); // Eat hash token.
2836  }
2837 
2838  if (parseSymbolicImmVal(Expr))
2839  return MatchOperand_ParseFail;
2840 
2841  AArch64MCExpr::VariantKind ELFRefKind;
2842  MCSymbolRefExpr::VariantKind DarwinRefKind;
2843  int64_t Addend;
2844  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2845  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2846  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2847  // No modifier was specified at all; this is the syntax for an ELF basic
2848  // ADRP relocation (unfortunately).
2849  Expr =
2851  } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2852  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2853  Addend != 0) {
2854  Error(S, "gotpage label reference not allowed an addend");
2855  return MatchOperand_ParseFail;
2856  } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2857  DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2858  DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2859  ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2860  ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2861  ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
2862  ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2863  ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2864  // The operand must be an @page or @gotpage qualified symbolref.
2865  Error(S, "page or gotpage label reference expected");
2866  return MatchOperand_ParseFail;
2867  }
2868  }
2869 
2870  // We have either a label reference possibly with addend or an immediate. The
2871  // addend is a raw value here. The linker will adjust it to only reference the
2872  // page.
2873  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2874  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2875 
2876  return MatchOperand_Success;
2877 }
2878 
2879 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2880 /// instruction.
2882 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2883  SMLoc S = getLoc();
2884  const MCExpr *Expr = nullptr;
2885 
2886  // Leave anything with a bracket to the default for SVE
2887  if (getTok().is(AsmToken::LBrac))
2888  return MatchOperand_NoMatch;
2889 
2890  if (getTok().is(AsmToken::Hash))
2891  Lex(); // Eat hash token.
2892 
2893  if (parseSymbolicImmVal(Expr))
2894  return MatchOperand_ParseFail;
2895 
2896  AArch64MCExpr::VariantKind ELFRefKind;
2897  MCSymbolRefExpr::VariantKind DarwinRefKind;
2898  int64_t Addend;
2899  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2900  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2901  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2902  // No modifier was specified at all; this is the syntax for an ELF basic
2903  // ADR relocation (unfortunately).
2904  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2905  } else {
2906  Error(S, "unexpected adr label");
2907  return MatchOperand_ParseFail;
2908  }
2909  }
2910 
2911  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2912  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2913  return MatchOperand_Success;
2914 }
2915 
2916 /// tryParseFPImm - A floating point immediate expression operand.
2917 template<bool AddFPZeroAsLiteral>
2919 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2920  SMLoc S = getLoc();
2921 
2922  bool Hash = parseOptionalToken(AsmToken::Hash);
2923 
2924  // Handle negation, as that still comes through as a separate token.
2925  bool isNegative = parseOptionalToken(AsmToken::Minus);
2926 
2927  const AsmToken &Tok = getTok();
2928  if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2929  if (!Hash)
2930  return MatchOperand_NoMatch;
2931  TokError("invalid floating point immediate");
2932  return MatchOperand_ParseFail;
2933  }
2934 
2935  // Parse hexadecimal representation.
2936  if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2937  if (Tok.getIntVal() > 255 || isNegative) {
2938  TokError("encoded floating point value out of range");
2939  return MatchOperand_ParseFail;
2940  }
2941 
2942  APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2943  Operands.push_back(
2944  AArch64Operand::CreateFPImm(F, true, S, getContext()));
2945  } else {
2946  // Parse FP representation.
2947  APFloat RealVal(APFloat::IEEEdouble());
2948  auto StatusOrErr =
2949  RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2950  if (errorToBool(StatusOrErr.takeError())) {
2951  TokError("invalid floating point representation");
2952  return MatchOperand_ParseFail;
2953  }
2954 
2955  if (isNegative)
2956  RealVal.changeSign();
2957 
2958  if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2959  Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
2960  Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
2961  } else
2962  Operands.push_back(AArch64Operand::CreateFPImm(
2963  RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2964  }
2965 
2966  Lex(); // Eat the token.
2967 
2968  return MatchOperand_Success;
2969 }
2970 
2971 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2972 /// a shift suffix, for example '#1, lsl #12'.
2974 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2975  SMLoc S = getLoc();
2976 
2977  if (getTok().is(AsmToken::Hash))
2978  Lex(); // Eat '#'
2979  else if (getTok().isNot(AsmToken::Integer))
2980  // Operand should start from # or should be integer, emit error otherwise.
2981  return MatchOperand_NoMatch;
2982 
2983  const MCExpr *Imm = nullptr;
2984  if (parseSymbolicImmVal(Imm))
2985  return MatchOperand_ParseFail;
2986  else if (getTok().isNot(AsmToken::Comma)) {
2987  Operands.push_back(
2988  AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
2989  return MatchOperand_Success;
2990  }
2991 
2992  // Eat ','
2993  Lex();
2994 
2995  // The optional operand must be "lsl #N" where N is non-negative.
2996  if (!getTok().is(AsmToken::Identifier) ||
2997  !getTok().getIdentifier().equals_insensitive("lsl")) {
2998  Error(getLoc(), "only 'lsl #+N' valid after immediate");
2999  return MatchOperand_ParseFail;
3000  }
3001 
3002  // Eat 'lsl'
3003  Lex();
3004 
3005  parseOptionalToken(AsmToken::Hash);
3006 
3007  if (getTok().isNot(AsmToken::Integer)) {
3008  Error(getLoc(), "only 'lsl #+N' valid after immediate");
3009  return MatchOperand_ParseFail;
3010  }
3011 
3012  int64_t ShiftAmount = getTok().getIntVal();
3013 
3014  if (ShiftAmount < 0) {
3015  Error(getLoc(), "positive shift amount required");
3016  return MatchOperand_ParseFail;
3017  }
3018  Lex(); // Eat the number
3019 
3020  // Just in case the optional lsl #0 is used for immediates other than zero.
3021  if (ShiftAmount == 0 && Imm != nullptr) {
3022  Operands.push_back(
3023  AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3024  return MatchOperand_Success;
3025  }
3026 
3027  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3028  getLoc(), getContext()));
3029  return MatchOperand_Success;
3030 }
3031 
3032 /// parseCondCodeString - Parse a Condition Code string.
3033 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
3035  .Case("eq", AArch64CC::EQ)
3036  .Case("ne", AArch64CC::NE)
3037  .Case("cs", AArch64CC::HS)
3038  .Case("hs", AArch64CC::HS)
3039  .Case("cc", AArch64CC::LO)
3040  .Case("lo", AArch64CC::LO)
3041  .Case("mi", AArch64CC::MI)
3042  .Case("pl", AArch64CC::PL)
3043  .Case("vs", AArch64CC::VS)
3044  .Case("vc", AArch64CC::VC)
3045  .Case("hi", AArch64CC::HI)
3046  .Case("ls", AArch64CC::LS)
3047  .Case("ge", AArch64CC::GE)
3048  .Case("lt", AArch64CC::LT)
3049  .Case("gt", AArch64CC::GT)
3050  .Case("le", AArch64CC::LE)
3051  .Case("al", AArch64CC::AL)
3052  .Case("nv", AArch64CC::NV)
3054 
3055  if (CC == AArch64CC::Invalid &&
3056  getSTI().getFeatureBits()[AArch64::FeatureSVE])
3058  .Case("none", AArch64CC::EQ)
3059  .Case("any", AArch64CC::NE)
3060  .Case("nlast", AArch64CC::HS)
3061  .Case("last", AArch64CC::LO)
3062  .Case("first", AArch64CC::MI)
3063  .Case("nfrst", AArch64CC::PL)
3064  .Case("pmore", AArch64CC::HI)
3065  .Case("plast", AArch64CC::LS)
3066  .Case("tcont", AArch64CC::GE)
3067  .Case("tstop", AArch64CC::LT)
3069 
3070  return CC;
3071 }
3072 
3073 /// parseCondCode - Parse a Condition Code operand.
3074 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3075  bool invertCondCode) {
3076  SMLoc S = getLoc();
3077  const AsmToken &Tok = getTok();
3078  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3079 
3080  StringRef Cond = Tok.getString();
3081  AArch64CC::CondCode CC = parseCondCodeString(Cond);
3082  if (CC == AArch64CC::Invalid)
3083  return TokError("invalid condition code");
3084  Lex(); // Eat identifier token.
3085 
3086  if (invertCondCode) {
3087  if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3088  return TokError("condition codes AL and NV are invalid for this instruction");
3090  }
3091 
3092  Operands.push_back(
3093  AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3094  return false;
3095 }
3096 
3098 AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3099  const AsmToken &Tok = getTok();
3100  SMLoc S = getLoc();
3101 
3102  if (Tok.isNot(AsmToken::Identifier)) {
3103  TokError("invalid operand for instruction");
3104  return MatchOperand_ParseFail;
3105  }
3106 
3107  unsigned PStateImm = -1;
3108  const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3109  if (SVCR && SVCR->haveFeatures(getSTI().getFeatureBits()))
3110  PStateImm = SVCR->Encoding;
3111 
3112  Operands.push_back(
3113  AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3114  Lex(); // Eat identifier token.
3115  return MatchOperand_Success;
3116 }
3117 
3119 AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3120  const AsmToken &Tok = getTok();
3121  SMLoc S = getLoc();
3122 
3123  StringRef Name = Tok.getString();
3124 
3125  if (Name.equals_insensitive("za")) {
3126  Lex(); // eat "za"
3127  Operands.push_back(AArch64Operand::CreateMatrixRegister(
3128  AArch64::ZA, /*ElementWidth=*/0, MatrixKind::Array, S, getLoc(),
3129  getContext()));
3130  if (getLexer().is(AsmToken::LBrac)) {
3131  // There's no comma after matrix operand, so we can parse the next operand
3132  // immediately.
3133  if (parseOperand(Operands, false, false))
3134  return MatchOperand_NoMatch;
3135  }
3136  return MatchOperand_Success;
3137  }
3138 
3139  // Try to parse matrix register.
3140  unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3141  if (!Reg)
3142  return MatchOperand_NoMatch;
3143 
3144  size_t DotPosition = Name.find('.');
3145  assert(DotPosition != StringRef::npos && "Unexpected register");
3146 
3147  StringRef Head = Name.take_front(DotPosition);
3148  StringRef Tail = Name.drop_front(DotPosition);
3149  StringRef RowOrColumn = Head.take_back();
3150 
3151  MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn)
3152  .Case("h", MatrixKind::Row)
3153  .Case("v", MatrixKind::Col)
3154  .Default(MatrixKind::Tile);
3155 
3156  // Next up, parsing the suffix
3157  const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3158  if (!KindRes) {
3159  TokError("Expected the register to be followed by element width suffix");
3160  return MatchOperand_ParseFail;
3161  }
3162  unsigned ElementWidth = KindRes->second;
3163 
3164  Lex();
3165 
3166  Operands.push_back(AArch64Operand::CreateMatrixRegister(
3167  Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3168 
3169  if (getLexer().is(AsmToken::LBrac)) {
3170  // There's no comma after matrix operand, so we can parse the next operand
3171  // immediately.
3172  if (parseOperand(Operands, false, false))
3173  return MatchOperand_NoMatch;
3174  }
3175  return MatchOperand_Success;
3176 }
3177 
3178 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3179 /// them if present.
3181 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3182  const AsmToken &Tok = getTok();
3183  std::string LowerID = Tok.getString().lower();
3186  .Case("lsl", AArch64_AM::LSL)
3187  .Case("lsr", AArch64_AM::LSR)
3188  .Case("asr", AArch64_AM::ASR)
3189  .Case("ror", AArch64_AM::ROR)
3190  .Case("msl", AArch64_AM::MSL)
3191  .Case("uxtb", AArch64_AM::UXTB)
3192  .Case("uxth", AArch64_AM::UXTH)
3193  .Case("uxtw", AArch64_AM::UXTW)
3194  .Case("uxtx", AArch64_AM::UXTX)
3195  .Case("sxtb", AArch64_AM::SXTB)
3196  .Case("sxth", AArch64_AM::SXTH)
3197  .Case("sxtw", AArch64_AM::SXTW)
3198  .Case("sxtx", AArch64_AM::SXTX)
3200 
3201  if (ShOp == AArch64_AM::InvalidShiftExtend)
3202  return MatchOperand_NoMatch;
3203 
3204  SMLoc S = Tok.getLoc();
3205  Lex();
3206 
3207  bool Hash = parseOptionalToken(AsmToken::Hash);
3208 
3209  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3210  if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3211  ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3212  ShOp == AArch64_AM::MSL) {
3213  // We expect a number here.
3214  TokError("expected #imm after shift specifier");
3215  return MatchOperand_ParseFail;
3216  }
3217 
3218  // "extend" type operations don't need an immediate, #0 is implicit.
3219  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3220  Operands.push_back(
3221  AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3222  return MatchOperand_Success;
3223  }
3224 
3225  // Make sure we do actually have a number, identifier or a parenthesized
3226  // expression.
3227  SMLoc E = getLoc();
3228  if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3229  !getTok().is(AsmToken::Identifier)) {
3230  Error(E, "expected integer shift amount");
3231  return MatchOperand_ParseFail;
3232  }
3233 
3234  const MCExpr *ImmVal;
3235  if (getParser().parseExpression(ImmVal))
3236  return MatchOperand_ParseFail;
3237 
3238  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3239  if (!MCE) {
3240  Error(E, "expected constant '#imm' after shift specifier");
3241  return MatchOperand_ParseFail;
3242  }
3243 
3244  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3245  Operands.push_back(AArch64Operand::CreateShiftExtend(
3246  ShOp, MCE->getValue(), true, S, E, getContext()));
3247  return MatchOperand_Success;
3248 }
3249 
3250 static const struct Extension {
3251  const char *Name;
3253 } ExtensionMap[] = {
3254  {"crc", {AArch64::FeatureCRC}},
3255  {"sm4", {AArch64::FeatureSM4}},
3256  {"sha3", {AArch64::FeatureSHA3}},
3257  {"sha2", {AArch64::FeatureSHA2}},
3258  {"aes", {AArch64::FeatureAES}},
3259  {"crypto", {AArch64::FeatureCrypto}},
3260  {"fp", {AArch64::FeatureFPARMv8}},
3261  {"simd", {AArch64::FeatureNEON}},
3262  {"ras", {AArch64::FeatureRAS}},
3263  {"lse", {AArch64::FeatureLSE}},
3264  {"predres", {AArch64::FeaturePredRes}},
3265  {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3266  {"mte", {AArch64::FeatureMTE}},
3267  {"memtag", {AArch64::FeatureMTE}},
3268  {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3269  {"pan", {AArch64::FeaturePAN}},
3270  {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3271  {"ccpp", {AArch64::FeatureCCPP}},
3272  {"rcpc", {AArch64::FeatureRCPC}},
3273  {"rng", {AArch64::FeatureRandGen}},
3274  {"sve", {AArch64::FeatureSVE}},
3275  {"sve2", {AArch64::FeatureSVE2}},
3276  {"sve2-aes", {AArch64::FeatureSVE2AES}},
3277  {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3278  {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3279  {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3280  {"ls64", {AArch64::FeatureLS64}},
3281  {"xs", {AArch64::FeatureXS}},
3282  {"pauth", {AArch64::FeaturePAuth}},
3283  {"flagm", {AArch64::FeatureFlagM}},
3284  {"rme", {AArch64::FeatureRME}},
3285  {"sme", {AArch64::FeatureSME}},
3286  {"sme-f64", {AArch64::FeatureSMEF64}},
3287  {"sme-i64", {AArch64::FeatureSMEI64}},
3288  {"hbc", {AArch64::FeatureHBC}},
3289  {"mops", {AArch64::FeatureMOPS}},
3290  // FIXME: Unsupported extensions
3291  {"lor", {}},
3292  {"rdma", {}},
3293  {"profile", {}},
3294 };
3295 
3296 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3297  if (FBS[AArch64::HasV8_0aOps])
3298  Str += "ARMv8a";
3299  if (FBS[AArch64::HasV8_1aOps])
3300  Str += "ARMv8.1a";
3301  else if (FBS[AArch64::HasV8_2aOps])
3302  Str += "ARMv8.2a";
3303  else if (FBS[AArch64::HasV8_3aOps])
3304  Str += "ARMv8.3a";
3305  else if (FBS[AArch64::HasV8_4aOps])
3306  Str += "ARMv8.4a";
3307  else if (FBS[AArch64::HasV8_5aOps])
3308  Str += "ARMv8.5a";
3309  else if (FBS[AArch64::HasV8_6aOps])
3310  Str += "ARMv8.6a";
3311  else if (FBS[AArch64::HasV8_7aOps])
3312  Str += "ARMv8.7a";
3313  else if (FBS[AArch64::HasV8_8aOps])
3314  Str += "ARMv8.8a";
3315  else if (FBS[AArch64::HasV9_0aOps])
3316  Str += "ARMv9-a";
3317  else if (FBS[AArch64::HasV9_1aOps])
3318  Str += "ARMv9.1a";
3319  else if (FBS[AArch64::HasV9_2aOps])
3320  Str += "ARMv9.2a";
3321  else if (FBS[AArch64::HasV9_3aOps])
3322  Str += "ARMv9.3a";
3323  else if (FBS[AArch64::HasV8_0rOps])
3324  Str += "ARMv8r";
3325  else {
3326  SmallVector<std::string, 2> ExtMatches;
3327  for (const auto& Ext : ExtensionMap) {
3328  // Use & in case multiple features are enabled
3329  if ((FBS & Ext.Features) != FeatureBitset())
3330  ExtMatches.push_back(Ext.Name);
3331  }
3332  Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3333  }
3334 }
3335 
3336 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3337  SMLoc S) {
3338  const uint16_t Op2 = Encoding & 7;
3339  const uint16_t Cm = (Encoding & 0x78) >> 3;
3340  const uint16_t Cn = (Encoding & 0x780) >> 7;
3341  const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3342 
3343  const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3344 
3345  Operands.push_back(
3346  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3347  Operands.push_back(
3348  AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3349  Operands.push_back(
3350  AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3351  Expr = MCConstantExpr::create(Op2, getContext());
3352  Operands.push_back(
3353  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3354 }
3355 
3356 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3357 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3358 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3360  if (Name.contains('.'))
3361  return TokError("invalid operand");
3362 
3363  Mnemonic = Name;
3364  Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3365 
3366  const AsmToken &Tok = getTok();
3367  StringRef Op = Tok.getString();
3368  SMLoc S = Tok.getLoc();
3369 
3370  if (Mnemonic == "ic") {
3371  const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3372  if (!IC)
3373  return TokError("invalid operand for IC instruction");
3374  else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3375  std::string Str("IC " + std::string(IC->Name) + " requires: ");
3377  return TokError(Str);
3378  }
3379  createSysAlias(IC->Encoding, Operands, S);
3380  } else if (Mnemonic == "dc") {
3381  const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3382  if (!DC)
3383  return TokError("invalid operand for DC instruction");
3384  else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3385  std::string Str("DC " + std::string(DC->Name) + " requires: ");
3386  setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3387  return TokError(Str);
3388  }
3389  createSysAlias(DC->Encoding, Operands, S);
3390  } else if (Mnemonic == "at") {
3391  const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3392  if (!AT)
3393  return TokError("invalid operand for AT instruction");
3394  else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3395  std::string Str("AT " + std::string(AT->Name) + " requires: ");
3397  return TokError(Str);
3398  }
3399  createSysAlias(AT->Encoding, Operands, S);
3400  } else if (Mnemonic == "tlbi") {
3401  const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3402  if (!TLBI)
3403  return TokError("invalid operand for TLBI instruction");
3404  else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3405  std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3407  return TokError(Str);
3408  }
3409  createSysAlias(TLBI->Encoding, Operands, S);
3410  } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
3411  const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
3412  if (!PRCTX)
3413  return TokError("invalid operand for prediction restriction instruction");
3414  else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
3415  std::string Str(
3416  Mnemonic.upper() + std::string(PRCTX->Name) + " requires: ");
3418  return TokError(Str);
3419  }
3420  uint16_t PRCTX_Op2 =
3421  Mnemonic == "cfp" ? 4 :
3422  Mnemonic == "dvp" ? 5 :
3423  Mnemonic == "cpp" ? 7 :
3424  0;
3425  assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction");
3426  createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
3427  }
3428 
3429  Lex(); // Eat operand.
3430 
3431  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3432  bool HasRegister = false;
3433 
3434  // Check for the optional register operand.
3435  if (parseOptionalToken(AsmToken::Comma)) {
3436  if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3437  return TokError("expected register operand");
3438  HasRegister = true;
3439  }
3440 
3441  if (ExpectRegister && !HasRegister)
3442  return TokError("specified " + Mnemonic + " op requires a register");
3443  else if (!ExpectRegister && HasRegister)
3444  return TokError("specified " + Mnemonic + " op does not use a register");
3445 
3446  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3447  return true;
3448 
3449  return false;
3450 }
3451 
3453 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3454  MCAsmParser &Parser = getParser();
3455  const AsmToken &Tok = getTok();
3456 
3457  if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3458  TokError("'csync' operand expected");
3459  return MatchOperand_ParseFail;
3460  } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3461  // Immediate operand.
3462  const MCExpr *ImmVal;
3463  SMLoc ExprLoc = getLoc();
3464  AsmToken IntTok = Tok;
3465  if (getParser().parseExpression(ImmVal))
3466  return MatchOperand_ParseFail;
3467  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3468  if (!MCE) {
3469  Error(ExprLoc, "immediate value expected for barrier operand");
3470  return MatchOperand_ParseFail;
3471  }
3472  int64_t Value = MCE->getValue();
3473  if (Mnemonic == "dsb" && Value > 15) {
3474  // This case is a no match here, but it might be matched by the nXS
3475  // variant. Deliberately not unlex the optional '#' as it is not necessary
3476  // to characterize an integer immediate.
3477  Parser.getLexer().UnLex(IntTok);
3478  return MatchOperand_NoMatch;
3479  }
3480  if (Value < 0 || Value > 15) {
3481  Error(ExprLoc, "barrier operand out of range");
3482  return MatchOperand_ParseFail;
3483  }
3484  auto DB = AArch64DB::lookupDBByEncoding(Value);
3485  Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3486  ExprLoc, getContext(),
3487  false /*hasnXSModifier*/));
3488  return MatchOperand_Success;
3489  }
3490 
3491  if (Tok.isNot(AsmToken::Identifier)) {
3492  TokError("invalid operand for instruction");
3493  return MatchOperand_ParseFail;
3494  }
3495 
3496  StringRef Operand = Tok.getString();
3497  auto TSB = AArch64TSB::lookupTSBByName(Operand);
3498  auto DB = AArch64DB::lookupDBByName(Operand);
3499  // The only valid named option for ISB is 'sy'
3500  if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3501  TokError("'sy' or #imm operand expected");
3502  return MatchOperand_ParseFail;
3503  // The only valid named option for TSB is 'csync'
3504  } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3505  TokError("'csync' operand expected");
3506  return MatchOperand_ParseFail;
3507  } else if (!DB && !TSB) {
3508  if (Mnemonic == "dsb") {
3509  // This case is a no match here, but it might be matched by the nXS
3510  // variant.
3511  return MatchOperand_NoMatch;
3512  }
3513  TokError("invalid barrier option name");
3514  return MatchOperand_ParseFail;
3515  }
3516 
3517  Operands.push_back(AArch64Operand::CreateBarrier(
3518  DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3519  getContext(), false /*hasnXSModifier*/));
3520  Lex(); // Consume the option
3521 
3522  return MatchOperand_Success;
3523 }
3524 
3526 AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3527  const AsmToken &Tok = getTok();
3528 
3529  assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
3530  if (Mnemonic != "dsb")
3531  return MatchOperand_ParseFail;
3532 
3533  if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3534  // Immediate operand.
3535  const MCExpr *ImmVal;
3536  SMLoc ExprLoc = getLoc();
3537  if (getParser().parseExpression(ImmVal))
3538  return MatchOperand_ParseFail;
3539  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3540  if (!MCE) {
3541  Error(ExprLoc, "immediate value expected for barrier operand");
3542  return MatchOperand_ParseFail;
3543  }
3544  int64_t Value = MCE->getValue();
3545  // v8.7-A DSB in the nXS variant accepts only the following immediate
3546  // values: 16, 20, 24, 28.
3547  if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
3548  Error(ExprLoc, "barrier operand out of range");
3549  return MatchOperand_ParseFail;
3550  }
3551  auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
3552  Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
3553  ExprLoc, getContext(),
3554  true /*hasnXSModifier*/));
3555  return MatchOperand_Success;
3556  }
3557 
3558  if (Tok.isNot(AsmToken::Identifier)) {
3559  TokError("invalid operand for instruction");
3560  return MatchOperand_ParseFail;
3561  }
3562 
3563  StringRef Operand = Tok.getString();
3564  auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
3565 
3566  if (!DB) {
3567  TokError("invalid barrier option name");
3568  return MatchOperand_ParseFail;
3569  }
3570 
3571  Operands.push_back(
3572  AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
3573  getContext(), true /*hasnXSModifier*/));
3574  Lex(); // Consume the option
3575 
3576  return MatchOperand_Success;
3577 }
3578 
3580 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3581  const AsmToken &Tok = getTok();
3582 
3583  if (Tok.isNot(AsmToken::Identifier))
3584  return MatchOperand_NoMatch;
3585 
3586  if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
3587  return MatchOperand_NoMatch;
3588 
3589  int MRSReg, MSRReg;
3590  auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3591  if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3592  MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3593  MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3594  } else
3595  MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3596 
3597  auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3598  unsigned PStateImm = -1;
3599  if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3600  PStateImm = PState->Encoding;
3601 
3602  Operands.push_back(
3603  AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3604  PStateImm, getContext()));
3605  Lex(); // Eat identifier
3606 
3607  return MatchOperand_Success;
3608 }
3609 
3610 /// tryParseNeonVectorRegister - Parse a vector register operand.
3611 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3612  if (getTok().isNot(AsmToken::Identifier))
3613  return true;
3614 
3615  SMLoc S = getLoc();
3616  // Check for a vector register specifier first.
3617  StringRef Kind;
3618  unsigned Reg;
3619  OperandMatchResultTy Res =
3620  tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3621  if (Res != MatchOperand_Success)
3622  return true;
3623 
3624  const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3625  if (!KindRes)
3626  return true;
3627 
3628  unsigned ElementWidth = KindRes->second;
3629  Operands.push_back(
3630  AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3631  S, getLoc(), getContext()));
3632 
3633  // If there was an explicit qualifier, that goes on as a literal text
3634  // operand.
3635  if (!Kind.empty())
3636  Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
3637 
3638  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3639 }
3640 
3642 AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3643  SMLoc SIdx = getLoc();
3644  if (parseOptionalToken(AsmToken::LBrac)) {
3645  const MCExpr *ImmVal;
3646  if (getParser().parseExpression(ImmVal))
3647  return MatchOperand_NoMatch;
3648  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3649  if (!MCE) {
3650  TokError("immediate value expected for vector index");
3651  return MatchOperand_ParseFail;;
3652  }
3653 
3654  SMLoc E = getLoc();
3655 
3656  if (parseToken(AsmToken::RBrac, "']' expected"))
3657  return MatchOperand_ParseFail;;
3658 
3659  Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3660  E, getContext()));
3661  return MatchOperand_Success;
3662  }
3663 
3664  return MatchOperand_NoMatch;
3665 }
3666 
3667 // tryParseVectorRegister - Try to parse a vector register name with
3668 // optional kind specifier. If it is a register specifier, eat the token
3669 // and return it.
3671 AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3672  RegKind MatchKind) {
3673  const AsmToken &Tok = getTok();
3674 
3675  if (Tok.isNot(AsmToken::Identifier))
3676  return MatchOperand_NoMatch;
3677 
3678  StringRef Name = Tok.getString();
3679  // If there is a kind specifier, it's separated from the register name by
3680  // a '.'.
3681  size_t Start = 0, Next = Name.find('.');
3682  StringRef Head = Name.slice(Start, Next);
3683  unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3684 
3685  if (RegNum) {
3686  if (Next != StringRef::npos) {
3687  Kind = Name.slice(Next, StringRef::npos);
3688  if (!isValidVectorKind(Kind, MatchKind)) {
3689  TokError("invalid vector kind qualifier");
3690  return MatchOperand_ParseFail;
3691  }
3692  }
3693  Lex(); // Eat the register token.
3694 
3695  Reg = RegNum;
3696  return MatchOperand_Success;
3697  }
3698 
3699  return MatchOperand_NoMatch;
3700 }
3701 
3702 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3704 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3705  // Check for a SVE predicate register specifier first.
3706  const SMLoc S = getLoc();
3707  StringRef Kind;
3708  unsigned RegNum;
3709  auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3710  if (Res != MatchOperand_Success)
3711  return Res;
3712 
3713  const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3714  if (!KindRes)
3715  return MatchOperand_NoMatch;
3716 
3717  unsigned ElementWidth = KindRes->second;
3718  Operands.push_back(AArch64Operand::CreateVectorReg(
3719  RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3720  getLoc(), getContext()));
3721 
3722  if (getLexer().is(AsmToken::LBrac)) {
3723  // Indexed predicate, there's no comma so try parse the next operand
3724  // immediately.
3725  if (parseOperand(Operands, false, false))
3726  return MatchOperand_NoMatch;
3727  }
3728 
3729  // Not all predicates are followed by a '/m' or '/z'.
3730  if (getTok().isNot(AsmToken::Slash))
3731  return MatchOperand_Success;
3732 
3733  // But when they do they shouldn't have an element type suffix.
3734  if (!Kind.empty()) {
3735  Error(S, "not expecting size suffix");
3736  return MatchOperand_ParseFail;
3737  }
3738 
3739  // Add a literal slash as operand
3740  Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
3741 
3742  Lex(); // Eat the slash.
3743 
3744  // Zeroing or merging?
3745  auto Pred = getTok().getString().lower();
3746  if (Pred != "z" && Pred != "m") {
3747  Error(getLoc(), "expecting 'm' or 'z' predication");
3748  return MatchOperand_ParseFail;
3749  }
3750 
3751  // Add zero/merge token.
3752  const char *ZM = Pred == "z" ? "z" : "m";
3753  Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
3754 
3755  Lex(); // Eat zero/merge token.
3756  return MatchOperand_Success;
3757 }
3758 
3759 /// parseRegister - Parse a register operand.
3760 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3761  // Try for a Neon vector register.
3762  if (!tryParseNeonVectorRegister(Operands))
3763  return false;
3764 
3765  // Otherwise try for a scalar register.
3766  if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3767  return false;
3768 
3769  return true;
3770 }
3771 
3772 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3773  bool HasELFModifier = false;
3775 
3776  if (parseOptionalToken(AsmToken::Colon)) {
3777  HasELFModifier = true;
3778 
3779  if (getTok().isNot(AsmToken::Identifier))
3780  return TokError("expect relocation specifier in operand after ':'");
3781 
3782  std::string LowerCase = getTok().getIdentifier().lower();
3783  RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3784  .Case("lo12", AArch64MCExpr::VK_LO12)
3785  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3786  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3787  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3788  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3789  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3790  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3791  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3792  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3793  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3794  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3795  .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3796  .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3797  .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3798  .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3799  .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3800  .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3801  .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3802  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3803  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3804  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3805  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3806  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3807  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3808  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3809  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3810  .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3811  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3812  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3813  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3814  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3815  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3816  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3817  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3818  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3819  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3821  .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
3822  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3824  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3825  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3826  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3828  .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3829  .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3831 
3832  if (RefKind == AArch64MCExpr::VK_INVALID)
3833  return TokError("expect relocation specifier in operand after ':'");
3834 
3835  Lex(); // Eat identifier
3836 
3837  if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3838  return true;
3839  }
3840 
3841  if (getParser().parseExpression(ImmVal))
3842  return true;
3843 
3844  if (HasELFModifier)
3845  ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3846 
3847  return false;
3848 }
3849 
3851 AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
3852  if (getTok().isNot(AsmToken::LCurly))
3853  return MatchOperand_NoMatch;
3854 
3855  auto ParseMatrixTile = [this](unsigned &Reg, unsigned &ElementWidth) {
3856  StringRef Name = getTok().getString();
3857  size_t DotPosition = Name.find('.');
3858  if (DotPosition == StringRef::npos)
3859  return MatchOperand_NoMatch;
3860 
3861  unsigned RegNum = matchMatrixTileListRegName(Name);
3862  if (!RegNum)
3863  return MatchOperand_NoMatch;
3864 
3865  StringRef Tail = Name.drop_front(DotPosition);
3866  const Optional<std::pair<int, int>> &KindRes =
3868  if (!KindRes) {
3869  TokError("Expected the register to be followed by element width suffix");
3870  return MatchOperand_ParseFail;
3871  }
3872  ElementWidth = KindRes->second;
3873  Reg = RegNum;
3874  Lex(); // Eat the register.
3875  return MatchOperand_Success;
3876  };
3877 
3878  SMLoc S = getLoc();
3879  auto LCurly = getTok();
3880  Lex(); // Eat left bracket token.
3881 
3882  // Empty matrix list
3883  if (parseOptionalToken(AsmToken::RCurly)) {
3884  Operands.push_back(AArch64Operand::CreateMatrixTileList(
3885  /*RegMask=*/0, S, getLoc(), getContext()));
3886  return MatchOperand_Success;
3887  }
3888 
3889  // Try parse {za} alias early
3890  if (getTok().getString().equals_insensitive("za")) {
3891  Lex(); // Eat 'za'
3892 
3893  if (parseToken(AsmToken::RCurly, "'}' expected"))
3894  return MatchOperand_ParseFail;
3895 
3896  Operands.push_back(AArch64Operand::CreateMatrixTileList(
3897  /*RegMask=*/0xFF, S, getLoc(), getContext()));
3898  return MatchOperand_Success;
3899  }
3900 
3901  SMLoc TileLoc = getLoc();
3902 
3903  unsigned FirstReg, ElementWidth;
3904  auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
3905  if (ParseRes != MatchOperand_Success) {
3906  getLexer().UnLex(LCurly);
3907  return ParseRes;
3908  }
3909 
3910  const MCRegisterInfo *RI = getContext().getRegisterInfo();
3911 
3912  unsigned PrevReg = FirstReg;
3913  unsigned Count = 1;
3914 
3915  SmallSet<unsigned, 8> DRegs;
3916  AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
3917 
3918  SmallSet<unsigned, 8> SeenRegs;
3919  SeenRegs.insert(FirstReg);
3920 
3921  while (parseOptionalToken(AsmToken::Comma)) {
3922  TileLoc = getLoc();
3923  unsigned Reg, NextElementWidth;
3924  ParseRes = ParseMatrixTile(Reg, NextElementWidth);
3925  if (ParseRes != MatchOperand_Success)
3926  return ParseRes;
3927 
3928  // Element size must match on all regs in the list.
3929  if (ElementWidth != NextElementWidth) {
3930  Error(TileLoc, "mismatched register size suffix");
3931  return MatchOperand_ParseFail;
3932  }
3933 
3934  if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
3935  Warning(TileLoc, "tile list not in ascending order");
3936 
3937  if (SeenRegs.contains(Reg))
3938  Warning(TileLoc, "duplicate tile in list");
3939  else {
3940  SeenRegs.insert(Reg);
3941  AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
3942  }
3943 
3944  PrevReg = Reg;
3945  ++Count;
3946  }
3947 
3948  if (parseToken(AsmToken::RCurly, "'}' expected"))
3949  return MatchOperand_ParseFail;
3950 
3951  unsigned RegMask = 0;
3952  for (auto Reg : DRegs)
3953  RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
3954  RI->getEncodingValue(AArch64::ZAD0));
3955  Operands.push_back(
3956  AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
3957 
3958  return MatchOperand_Success;
3959 }
3960 
3961 template <RegKind VectorKind>
3963 AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3964  bool ExpectMatch) {
3965  MCAsmParser &Parser = getParser();
3966  if (!getTok().is(AsmToken::LCurly))
3967  return MatchOperand_NoMatch;
3968 
3969  // Wrapper around parse function
3970  auto ParseVector = [this](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3971  bool NoMatchIsError) {
3972  auto RegTok = getTok();
3973  auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3974  if (ParseRes == MatchOperand_Success) {
3975  if (parseVectorKind(Kind, VectorKind))
3976  return ParseRes;
3977  llvm_unreachable("Expected a valid vector kind");
3978  }
3979 
3980  if (RegTok.isNot(AsmToken::Identifier) ||
3981  ParseRes == MatchOperand_ParseFail ||
3982  (ParseRes == MatchOperand_NoMatch && NoMatchIsError &&
3983  !RegTok.getString().startswith_insensitive("za"))) {
3984  Error(Loc, "vector register expected");
3985  return MatchOperand_ParseFail;
3986  }
3987 
3988  return MatchOperand_NoMatch;
3989  };
3990 
3991  SMLoc S = getLoc();
3992  auto LCurly = getTok();
3993  Lex(); // Eat left bracket token.
3994 
3995  StringRef Kind;
3996  unsigned FirstReg;
3997  auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3998 
3999  // Put back the original left bracket if there was no match, so that
4000  // different types of list-operands can be matched (e.g. SVE, Neon).
4001  if (ParseRes == MatchOperand_NoMatch)
4002  Parser.getLexer().UnLex(LCurly);
4003 
4004  if (ParseRes != MatchOperand_Success)
4005  return ParseRes;
4006 
4007  int64_t PrevReg = FirstReg;
4008  unsigned Count = 1;
4009 
4010  if (parseOptionalToken(AsmToken::Minus)) {
4011  SMLoc Loc = getLoc();
4012  StringRef NextKind;
4013 
4014  unsigned Reg;
4015  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4016  if (ParseRes != MatchOperand_Success)
4017  return ParseRes;
4018 
4019  // Any Kind suffices must match on all regs in the list.
4020  if (Kind != NextKind) {
4021  Error(Loc, "mismatched register size suffix");
4022  return MatchOperand_ParseFail;
4023  }
4024 
4025  unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
4026 
4027  if (Space == 0 || Space > 3) {
4028  Error(Loc, "invalid number of vectors");
4029  return MatchOperand_ParseFail;
4030  }
4031 
4032  Count += Space;
4033  }
4034  else {
4035  while (parseOptionalToken(AsmToken::Comma)) {
4036  SMLoc Loc = getLoc();
4037  StringRef NextKind;
4038  unsigned Reg;
4039  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4040  if (ParseRes != MatchOperand_Success)
4041  return ParseRes;
4042 
4043  // Any Kind suffices must match on all regs in the list.
4044  if (Kind != NextKind) {
4045  Error(Loc, "mismatched register size suffix");
4046  return MatchOperand_ParseFail;
4047  }
4048 
4049  // Registers must be incremental (with wraparound at 31)
4050  if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
4051  (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
4052  Error(Loc, "registers must be sequential");
4053  return MatchOperand_ParseFail;
4054  }
4055 
4056  PrevReg = Reg;
4057  ++Count;
4058  }
4059  }
4060 
4061  if (parseToken(AsmToken::RCurly, "'}' expected"))
4062  return MatchOperand_ParseFail;
4063 
4064  if (Count > 4) {
4065  Error(S, "invalid number of vectors");
4066  return MatchOperand_ParseFail;
4067  }
4068 
4069  unsigned NumElements = 0;
4070  unsigned ElementWidth = 0;
4071  if (!Kind.empty()) {
4072  if (const auto &VK = parseVectorKind(Kind, VectorKind))
4073  std::tie(NumElements, ElementWidth) = *VK;
4074  }
4075 
4076  Operands.push_back(AArch64Operand::CreateVectorList(
4077  FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
4078  getContext()));
4079 
4080  return MatchOperand_Success;
4081 }
4082 
4083 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4084 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4085  auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4086  if (ParseRes != MatchOperand_Success)
4087  return true;
4088 
4089  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
4090 }
4091 
4093 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4094  SMLoc StartLoc = getLoc();
4095 
4096  unsigned RegNum;
4097  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4098  if (Res != MatchOperand_Success)
4099  return Res;
4100 
4101  if (!parseOptionalToken(AsmToken::Comma)) {
4102  Operands.push_back(AArch64Operand::CreateReg(
4103  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4104  return MatchOperand_Success;
4105  }
4106 
4107  parseOptionalToken(AsmToken::Hash);
4108 
4109  if (getTok().isNot(AsmToken::Integer)) {
4110  Error(getLoc(), "index must be absent or #0");
4111  return MatchOperand_ParseFail;
4112  }
4113 
4114  const MCExpr *ImmVal;
4115  if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4116  cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
4117  Error(getLoc(), "index must be absent or #0");
4118  return MatchOperand_ParseFail;
4119  }
4120 
4121  Operands.push_back(AArch64Operand::CreateReg(
4122  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4123  return MatchOperand_Success;
4124 }
4125 
4126 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4128 AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4129  SMLoc StartLoc = getLoc();
4130 
4131  unsigned RegNum;
4132  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4133  if (Res != MatchOperand_Success)
4134  return Res;
4135 
4136  // No shift/extend is the default.
4137  if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4138  Operands.push_back(AArch64Operand::CreateReg(
4139  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4140  return MatchOperand_Success;
4141  }
4142 
4143  // Eat the comma
4144  Lex();
4145 
4146  // Match the shift
4148  Res = tryParseOptionalShiftExtend(ExtOpnd);
4149  if (Res != MatchOperand_Success)
4150  return Res;
4151 
4152  auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4153  Operands.push_back(AArch64Operand::CreateReg(
4154  RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4155  Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4156  Ext->hasShiftExtendAmount()));
4157 
4158  return MatchOperand_Success;
4159 }
4160 
4161 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4162  MCAsmParser &Parser = getParser();
4163 
4164  // Some SVE instructions have a decoration after the immediate, i.e.
4165  // "mul vl". We parse them here and add tokens, which must be present in the
4166  // asm string in the tablegen instruction.
4167  bool NextIsVL =
4168  Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4169  bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4170  if (!getTok().getString().equals_insensitive("mul") ||
4171  !(NextIsVL || NextIsHash))
4172  return true;
4173 
4174  Operands.push_back(
4175  AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4176  Lex(); // Eat the "mul"
4177 
4178  if (NextIsVL) {
4179  Operands.push_back(
4180  AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4181  Lex(); // Eat the "vl"
4182  return false;
4183  }
4184 
4185  if (NextIsHash) {
4186  Lex(); // Eat the #
4187  SMLoc S = getLoc();
4188 
4189  // Parse immediate operand.
4190  const MCExpr *ImmVal;
4191  if (!Parser.parseExpression(ImmVal))
4192  if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4193  Operands.push_back(AArch64Operand::CreateImm(
4194  MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4195  getContext()));
4196  return MatchOperand_Success;
4197  }
4198  }
4199 
4200  return Error(getLoc(), "expected 'vl' or '#<imm>'");
4201 }
4202 
4203 bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4204  auto Tok = getTok();
4205  if (Tok.isNot(AsmToken::Identifier))
4206  return true;
4207 
4208  auto Keyword = Tok.getString();
4209  Keyword = StringSwitch<StringRef>(Keyword.lower())
4210  .Case("sm", "sm")
4211  .Case("za", "za")
4212  .Default(Keyword);
4213  Operands.push_back(
4214  AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4215 
4216  Lex();
4217  return false;
4218 }
4219 
4220 /// parseOperand - Parse a arm instruction operand. For now this parses the
4221 /// operand regardless of the mnemonic.
4222 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4223  bool invertCondCode) {
4224  MCAsmParser &Parser = getParser();
4225 
4226  OperandMatchResultTy ResTy =
4227  MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
4228 
4229  // Check if the current operand has a custom associated parser, if so, try to
4230  // custom parse the operand, or fallback to the general approach.
4231  if (ResTy == MatchOperand_Success)
4232  return false;
4233  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4234  // there was a match, but an error occurred, in which case, just return that
4235  // the operand parsing failed.
4236  if (ResTy == MatchOperand_ParseFail)
4237  return true;
4238 
4239  // Nothing custom, so do general case parsing.
4240  SMLoc S, E;
4241  switch (getLexer().getKind()) {
4242  default: {
4243  SMLoc S = getLoc();
4244  const MCExpr *Expr;
4245  if (parseSymbolicImmVal(Expr))
4246  return Error(S, "invalid operand");
4247 
4248  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4249  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4250  return false;
4251  }
4252  case AsmToken::LBrac: {
4253  Operands.push_back(
4254  AArch64Operand::CreateToken("[", getLoc(), getContext()));
4255  Lex(); // Eat '['
4256 
4257  // There's no comma after a '[', so we can parse the next operand
4258  // immediately.
4259  return parseOperand(Operands, false, false);
4260  }
4261  case AsmToken::LCurly: {
4262  if (!parseNeonVectorList(Operands))
4263  return false;
4264 
4265  Operands.push_back(
4266  AArch64Operand::CreateToken("{", getLoc(), getContext()));
4267  Lex(); // Eat '{'
4268 
4269  // There's no comma after a '{', so we can parse the next operand
4270  // immediately.
4271  return parseOperand(Operands, false, false);
4272  }
4273  case AsmToken::Identifier: {
4274  // If we're expecting a Condition Code operand, then just parse that.
4275  if (isCondCode)
4276  return parseCondCode(Operands, invertCondCode);
4277 
4278  // If it's a register name, parse it.
4279  if (!parseRegister(Operands))
4280  return false;
4281 
4282  // See if this is a "mul vl" decoration or "mul #<int>" operand used
4283  // by SVE instructions.
4284  if (!parseOptionalMulOperand(Operands))
4285  return false;
4286 
4287  // If this is an "smstart" or "smstop" instruction, parse its special
4288  // keyword operand as an identifier.
4289  if (Mnemonic == "smstart" || Mnemonic == "smstop")
4290  return parseKeywordOperand(Operands);
4291 
4292  // This could be an optional "shift" or "extend" operand.
4293  OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
4294  // We can only continue if no tokens were eaten.
4295  if (GotShift != MatchOperand_NoMatch)
4296  return GotShift;
4297 
4298  // If this is a two-word mnemonic, parse its special keyword
4299  // operand as an identifier.
4300  if (Mnemonic == "brb")
4301  return parseKeywordOperand(Operands);
4302 
4303  // This was not a register so parse other operands that start with an
4304  // identifier (like labels) as expressions and create them as immediates.
4305  const MCExpr *IdVal;
4306  S = getLoc();
4307  if (getParser().parseExpression(IdVal))
4308  return true;
4309  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4310  Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4311  return false;
4312  }
4313  case AsmToken::Integer:
4314  case AsmToken::Real:
4315  case AsmToken::Hash: {
4316  // #42 -> immediate.
4317  S = getLoc();
4318 
4319  parseOptionalToken(AsmToken::Hash);
4320 
4321  // Parse a negative sign
4322  bool isNegative = false;
4323  if (getTok().is(AsmToken::Minus)) {
4324  isNegative = true;
4325  // We need to consume this token only when we have a Real, otherwise
4326  // we let parseSymbolicImmVal take care of it
4327  if (Parser.getLexer().peekTok().is(AsmToken::Real))
4328  Lex();
4329  }
4330 
4331  // The only Real that should come through here is a literal #0.0 for
4332  // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4333  // so convert the value.
4334  const AsmToken &Tok = getTok();
4335  if (Tok.is(AsmToken::Real)) {
4336  APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4337  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4338  if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4339  Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4340  Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4341  return TokError("unexpected floating point literal");
4342  else if (IntVal != 0 || isNegative)
4343  return TokError("expected floating-point constant #0.0");
4344  Lex(); // Eat the token.
4345 
4346  Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4347  Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4348  return false;
4349  }
4350 
4351  const MCExpr *ImmVal;
4352  if (parseSymbolicImmVal(ImmVal))
4353  return true;
4354 
4355  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4356  Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4357  return false;
4358  }
4359  case AsmToken::Equal: {
4360  SMLoc Loc = getLoc();
4361  if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4362  return TokError("unexpected token in operand");
4363  Lex(); // Eat '='
4364  const MCExpr *SubExprVal;
4365  if (getParser().parseExpression(SubExprVal))
4366  return true;
4367 
4368  if (Operands.size() < 2 ||
4369  !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4370  return Error(Loc, "Only valid when first operand is register");
4371 
4372  bool IsXReg =
4373  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4374  Operands[1]->getReg());
4375 
4376  MCContext& Ctx = getContext();
4377  E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4378  // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4379  if (isa<MCConstantExpr>(SubExprVal)) {
4380  uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4381  uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4382  while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
4383  ShiftAmt += 16;
4384  Imm >>= 16;
4385  }
4386  if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4387  Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4388  Operands.push_back(AArch64Operand::CreateImm(
4389  MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4390  if (ShiftAmt)
4391  Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4392  ShiftAmt, true, S, E, Ctx));
4393  return false;
4394  }
4395  APInt Simm = APInt(64, Imm << ShiftAmt);
4396  // check if the immediate is an unsigned or signed 32-bit int for W regs
4397  if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4398  return Error(Loc, "Immediate too large for register");
4399  }
4400  // If it is a label or an imm that cannot fit in a movz, put it into CP.
4401  const MCExpr *CPLoc =
4402  getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4403  Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4404  return false;
4405  }
4406  }
4407 }
4408 
4409 bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4410  const MCExpr *Expr = nullptr;
4411  SMLoc L = getLoc();
4412  if (check(getParser().parseExpression(Expr), L, "expected expression"))
4413  return true;
4414  const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4415  if (check(!Value, L, "expected constant expression"))
4416  return true;
4417  Out = Value->getValue();
4418  return false;
4419 }
4420 
4421 bool AArch64AsmParser::parseComma() {
4422  if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
4423  return true;
4424  // Eat the comma
4425  Lex();
4426  return false;
4427 }
4428 
4429 bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
4430  unsigned First, unsigned Last) {
4431  unsigned Reg;
4432  SMLoc Start, End;
4433  if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register"))
4434  return true;
4435 
4436  // Special handling for FP and LR; they aren't linearly after x28 in
4437  // the registers enum.
4438  unsigned RangeEnd = Last;
4439  if (Base == AArch64::X0) {
4440  if (Last == AArch64::FP) {
4441  RangeEnd = AArch64::X28;
4442  if (Reg == AArch64::FP) {
4443  Out = 29;
4444  return false;
4445  }
4446  }
4447  if (Last == AArch64::LR) {
4448  RangeEnd = AArch64::X28;
4449  if (Reg == AArch64::FP) {
4450  Out = 29;
4451  return false;
4452  } else if (Reg == AArch64::LR) {
4453  Out = 30;
4454  return false;
4455  }
4456  }
4457  }
4458 
4459  if (check(Reg < First || Reg > RangeEnd, Start,
4460  Twine("expected register in range ") +
4461  AArch64InstPrinter::getRegisterName(First) + " to " +
4463  return true;
4464  Out = Reg - Base;
4465  return false;
4466 }
4467 
4468 bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
4469  const MCParsedAsmOperand &Op2) const {
4470  auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
4471  auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
4472  if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
4473  AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
4474  return MCTargetAsmParser::regsEqual(Op1, Op2);
4475 
4476  assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
4477  "Testing equality of non-scalar registers not supported");
4478 
4479  // Check if a registers match their sub/super register classes.
4480  if (AOp1.getRegEqualityTy() == EqualsSuperReg)
4481  return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
4482  if (AOp1.getRegEqualityTy() == EqualsSubReg)
4483  return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
4484  if (AOp2.getRegEqualityTy() == EqualsSuperReg)
4485  return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
4486  if (AOp2.getRegEqualityTy() == EqualsSubReg)
4487  return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
4488 
4489  return false;
4490 }
4491 
4492 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
4493 /// operands.
4494 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
4495  StringRef Name, SMLoc NameLoc,
4497  Name = StringSwitch<StringRef>(Name.lower())
4498  .Case("beq", "b.eq")
4499  .Case("bne", "b.ne")
4500  .Case("bhs", "b.hs")
4501  .Case("bcs", "b.cs")
4502  .Case("blo", "b.lo")
4503  .Case("bcc", "b.cc")
4504  .Case("bmi", "b.mi")
4505  .Case("bpl", "b.pl")
4506  .Case("bvs", "b.vs")
4507  .Case("bvc", "b.vc")
4508  .Case("bhi", "b.hi")
4509  .Case("bls", "b.ls")
4510  .Case("bge", "b.ge")
4511  .Case("blt", "b.lt")
4512  .Case("bgt", "b.gt")
4513  .Case("ble", "b.le")
4514  .Case("bal", "b.al")
4515  .Case("bnv", "b.nv")
4516  .Default(Name);
4517 
4518  // First check for the AArch64-specific .req directive.
4519  if (getTok().is(AsmToken::Identifier) &&
4520  getTok().getIdentifier().lower() == ".req") {
4521  parseDirectiveReq(Name, NameLoc);
4522  // We always return 'error' for this, as we're done with this
4523  // statement and don't need to match the 'instruction."
4524  return true;
4525  }
4526 
4527  // Create the leading tokens for the mnemonic, split by '.' characters.
4528  size_t Start = 0, Next = Name.find('.');
4529  StringRef Head = Name.slice(Start, Next);
4530 
4531  // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
4532  // the SYS instruction.
4533  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
4534  Head == "cfp" || Head == "dvp" || Head == "cpp")
4535  return parseSysAlias(Head, NameLoc, Operands);
4536 
4537  Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
4538  Mnemonic = Head;
4539 
4540  // Handle condition codes for a branch mnemonic
4541  if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
4542  Start = Next;
4543  Next = Name.find('.', Start + 1);
4544  Head = Name.slice(Start + 1, Next);
4545 
4546  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4547  (Head.data() - Name.data()));
4548  AArch64CC::CondCode CC = parseCondCodeString(Head);
4549  if (CC == AArch64CC::Invalid)
4550  return Error(SuffixLoc, "invalid condition code");
4551  Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
4552  /*IsSuffix=*/true));
4553  Operands.push_back(
4554  AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
4555  }
4556 
4557  // Add the remaining tokens in the mnemonic.
4558  while (Next != StringRef::npos) {
4559  Start = Next;
4560  Next = Name.find('.', Start + 1);
4561  Head = Name.slice(Start, Next);
4562  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4563  (Head.data() - Name.data()) + 1);
4564  Operands.push_back(AArch64Operand::CreateToken(
4565  Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
4566  }
4567 
4568  // Conditional compare instructions have a Condition Code operand, which needs
4569  // to be parsed and an immediate operand created.
4570  bool condCodeFourthOperand =
4571  (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
4572  Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
4573  Head == "csinc" || Head == "csinv" || Head == "csneg");
4574 
4575  // These instructions are aliases to some of the conditional select
4576  // instructions. However, the condition code is inverted in the aliased
4577  // instruction.
4578  //
4579  // FIXME: Is this the correct way to handle these? Or should the parser
4580  // generate the aliased instructions directly?
4581  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
4582  bool condCodeThirdOperand =
4583  (Head == "cinc" || Head == "cinv" || Head == "cneg");
4584 
4585  // Read the remaining operands.
4586  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4587 
4588  unsigned N = 1;
4589  do {
4590  // Parse and remember the operand.
4591  if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
4592  (N == 3 && condCodeThirdOperand) ||
4593  (N == 2 && condCodeSecondOperand),
4594  condCodeSecondOperand || condCodeThirdOperand)) {
4595  return true;
4596  }
4597 
4598  // After successfully parsing some operands there are three special cases
4599  // to consider (i.e. notional operands not separated by commas). Two are
4600  // due to memory specifiers:
4601  // + An RBrac will end an address for load/store/prefetch
4602  // + An '!' will indicate a pre-indexed operation.
4603  //
4604  // And a further case is '}', which ends a group of tokens specifying the
4605  // SME accumulator array 'ZA' or tile vector, i.e.
4606  //
4607  // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
4608  //
4609  // It's someone else's responsibility to make sure these tokens are sane
4610  // in the given context!
4611 
4612  if (parseOptionalToken(AsmToken::RBrac))
4613  Operands.push_back(
4614  AArch64Operand::CreateToken("]", getLoc(), getContext()));
4615  if (parseOptionalToken(AsmToken::Exclaim))
4616  Operands.push_back(
4617  AArch64Operand::CreateToken("!", getLoc(), getContext()));
4618  if (parseOptionalToken(AsmToken::RCurly))
4619  Operands.push_back(
4620  AArch64Operand::CreateToken("}", getLoc(), getContext()));
4621 
4622  ++N;
4623  } while (parseOptionalToken(AsmToken::Comma));
4624  }
4625 
4626  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4627  return true;
4628 
4629  return false;
4630 }
4631 
4632 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
4633  assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
4634  return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
4635  (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
4636  (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
4637  (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
4638  (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
4639  (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
4640 }
4641 
4642 // FIXME: This entire function is a giant hack to provide us with decent
4643 // operand range validation/diagnostics until TableGen/MC can be extended
4644 // to support autogeneration of this kind of validation.
4645 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
4646  SmallVectorImpl<SMLoc> &Loc) {
4647  const MCRegisterInfo *RI = getContext().getRegisterInfo();
4648  const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
4649 
4650  // A prefix only applies to the instruction following it. Here we extract
4651  // prefix information for the next instruction before validating the current
4652  // one so that in the case of failure we don't erronously continue using the
4653  // current prefix.
4654  PrefixInfo Prefix = NextPrefix;
4655  NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
4656 
4657  // Before validating the instruction in isolation we run through the rules
4658  // applicable when it follows a prefix instruction.
4659  // NOTE: brk & hlt can be prefixed but require no additional validation.
4660  if (Prefix.isActive() &&
4661  (Inst.getOpcode() != AArch64::BRK) &&
4662  (Inst.getOpcode() != AArch64::HLT)) {
4663 
4664  // Prefixed intructions must have a destructive operand.
4667  return Error(IDLoc, "instruction is unpredictable when following a"
4668  " movprfx, suggest replacing movprfx with mov");
4669 
4670  // Destination operands must match.
4671  if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
4672  return Error(Loc[0], "instruction is unpredictable when following a"
4673  " movprfx writing to a different destination");
4674 
4675  // Destination operand must not be used in any other location.
4676  for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
4677  if (Inst.getOperand(i).isReg() &&
4678  (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
4679  isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
4680  return Error(Loc[0], "instruction is unpredictable when following a"
4681  " movprfx and destination also used as non-destructive"
4682  " source");
4683  }
4684 
4685  auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
4686  if (Prefix.isPredicated()) {
4687  int PgIdx = -1;
4688 
4689  // Find the instructions general predicate.
4690  for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
4691  if (Inst.getOperand(i).isReg() &&
4692  PPRRegClass.contains(Inst.getOperand(i).getReg())) {
4693  PgIdx = i;
4694  break;
4695  }
4696 
4697  // Instruction must be predicated if the movprfx is predicated.
4698  if (PgIdx == -1 ||
4700  return Error(IDLoc, "instruction is unpredictable when following a"
4701  " predicated movprfx, suggest using unpredicated movprfx");
4702 
4703  // Instruction must use same general predicate as the movprfx.
4704  if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
4705  return Error(IDLoc, "instruction is unpredictable when following a"
4706  " predicated movprfx using a different general predicate");
4707 
4708  // Instruction element type must match the movprfx.
4709  if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
4710  return Error(IDLoc, "instruction is unpredictable when following a"
4711  " predicated movprfx with a different element size");
4712  }
4713  }
4714 
4715  // Check for indexed addressing modes w/ the base register being the
4716  // same as a destination/source register or pair load where
4717  // the Rt == Rt2. All of those are undefined behaviour.
4718  switch (Inst.getOpcode()) {
4719  case AArch64::LDPSWpre:
4720  case AArch64::LDPWpost:
4721  case AArch64::LDPWpre:
4722  case AArch64::LDPXpost:
4723  case AArch64::LDPXpre: {
4724  unsigned Rt = Inst.getOperand(1).getReg();
4725  unsigned Rt2 = Inst.getOperand(2).getReg();
4726  unsigned Rn = Inst.getOperand(3).getReg();
4727  if (RI->isSubRegisterEq(Rn, Rt))
4728  return Error(Loc[0], "unpredictable LDP instruction, writeback base "
4729  "is also a destination");
4730  if (RI->isSubRegisterEq(Rn, Rt2))
4731  return Error(Loc[1], "unpredictable LDP instruction, writeback base "
4732  "is also a destination");
4734  }
4735  case AArch64::LDPDi:
4736  case AArch64::LDPQi:
4737  case AArch64::LDPSi:
4738  case AArch64::LDPSWi:
4739  case AArch64::LDPWi:
4740  case AArch64::LDPXi: {
4741  unsigned Rt = Inst.getOperand(0).getReg();
4742  unsigned Rt2 = Inst.getOperand(1).getReg();
4743  if (Rt == Rt2)
4744  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4745  break;
4746  }
4747  case AArch64::LDPDpost:
4748  case AArch64::LDPDpre:
4749  case AArch64::LDPQpost:
4750  case AArch64::LDPQpre:
4751  case AArch64::LDPSpost:
4752  case AArch64::LDPSpre:
4753  case AArch64::LDPSWpost: {
4754  unsigned Rt = Inst.getOperand(1).getReg();
4755  unsigned Rt2 = Inst.getOperand(2).getReg();
4756  if (Rt == Rt2)
4757  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4758  break;
4759  }
4760  case AArch64::STPDpost:
4761  case AArch64::STPDpre:
4762  case AArch64::STPQpost:
4763  case AArch64::STPQpre:
4764  case AArch64::STPSpost:
4765  case AArch64::STPSpre:
4766  case AArch64::STPWpost:
4767  case AArch64::STPWpre:
4768  case AArch64::STPXpost:
4769  case AArch64::STPXpre: {
4770  unsigned Rt = Inst.getOperand(1).getReg();
4771  unsigned Rt2 = Inst.getOperand(2).getReg();
4772  unsigned Rn = Inst.getOperand(3).getReg();
4773  if (RI->isSubRegisterEq(Rn, Rt))
4774  return Error(Loc[0], "unpredictable STP instruction, writeback base "
4775  "is also a source");
4776  if (RI->isSubRegisterEq(Rn, Rt2))
4777  return Error(Loc[1], "unpredictable STP instruction, writeback base "
4778  "is also a source");
4779  break;
4780  }
4781  case AArch64::LDRBBpre:
4782  case AArch64::LDRBpre:
4783  case AArch64::LDRHHpre:
4784  case AArch64::LDRHpre:
4785  case AArch64::LDRSBWpre:
4786  case AArch64::LDRSBXpre:
4787  case AArch64::LDRSHWpre:
4788  case AArch64::LDRSHXpre:
4789  case AArch64::LDRSWpre:
4790  case AArch64::LDRWpre:
4791  case AArch64::LDRXpre:
4792  case AArch64::LDRBBpost:
4793  case AArch64::LDRBpost:
4794  case AArch64::LDRHHpost:
4795  case AArch64::LDRHpost:
4796  case AArch64::LDRSBWpost:
4797  case AArch64::LDRSBXpost:
4798  case AArch64::LDRSHWpost:
4799  case AArch64::LDRSHXpost:
4800  case AArch64::LDRSWpost:
4801  case AArch64::LDRWpost:
4802  case AArch64::LDRXpost: {
4803  unsigned Rt = Inst.getOperand(1).getReg();
4804  unsigned Rn = Inst.getOperand(2).getReg();
4805  if (RI->isSubRegisterEq(Rn, Rt))
4806  return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4807  "is also a source");
4808  break;
4809  }
4810  case AArch64::STRBBpost:
4811  case AArch64::STRBpost:
4812  case AArch64::STRHHpost:
4813  case AArch64::STRHpost:
4814  case AArch64::STRWpost:
4815  case AArch64::STRXpost:
4816  case AArch64::STRBBpre:
4817  case AArch64::STRBpre:
4818  case AArch64::STRHHpre:
4819  case AArch64::STRHpre:
4820  case AArch64::STRWpre:
4821  case AArch64::STRXpre: {
4822  unsigned Rt = Inst.getOperand(1).getReg();
4823  unsigned Rn = Inst.getOperand(2).getReg();
4824  if (RI->isSubRegisterEq(Rn, Rt))
4825  return Error(Loc[0], "unpredictable STR instruction, writeback base "
4826  "is also a source");
4827  break;
4828  }
4829  case AArch64::STXRB:
4830  case AArch64::STXRH:
4831  case AArch64::STXRW:
4832  case AArch64::STXRX:
4833  case AArch64::STLXRB:
4834  case AArch64::STLXRH:
4835  case AArch64::STLXRW:
4836  case AArch64::STLXRX: {
4837  unsigned Rs = Inst.getOperand(0).getReg();
4838  unsigned Rt = Inst.getOperand(1).getReg();
4839  unsigned Rn = Inst.getOperand(2).getReg();
4840  if (RI->isSubRegisterEq(Rt, Rs) ||
4841  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4842  return Error(Loc[0],
4843  "unpredictable STXR instruction, status is also a source");
4844  break;
4845  }
4846  case AArch64::STXPW:
4847  case AArch64::STXPX:
4848  case AArch64::STLXPW:
4849  case AArch64::STLXPX: {
4850  unsigned Rs = Inst.getOperand(0).getReg();
4851  unsigned Rt1 = Inst.getOperand(1).getReg();
4852  unsigned Rt2 = Inst.getOperand(2).getReg();
4853  unsigned Rn = Inst.getOperand(3).getReg();
4854  if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4855  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4856  return Error(Loc[0],
4857  "unpredictable STXP instruction, status is also a source");
4858  break;
4859  }
4860  case AArch64::LDRABwriteback:
4861  case AArch64::LDRAAwriteback: {
4862  unsigned Xt = Inst.getOperand(0).getReg();
4863  unsigned Xn = Inst.getOperand(1).getReg();
4864  if (Xt == Xn)
4865  return Error(Loc[0],
4866  "unpredictable LDRA instruction, writeback base"
4867  " is also a destination");
4868  break;
4869  }
4870  }
4871 
4872  // Check v8.8-A memops instructions.
4873  switch (Inst.getOpcode()) {
4874  case AArch64::CPYFP:
4875  case AArch64::CPYFPWN:
4876  case AArch64::CPYFPRN:
4877  case AArch64::CPYFPN:
4878  case AArch64::CPYFPWT:
4879  case AArch64::CPYFPWTWN:
4880  case AArch64::CPYFPWTRN:
4881  case AArch64::CPYFPWTN:
4882  case AArch64::CPYFPRT:
4883  case AArch64::CPYFPRTWN:
4884  case AArch64::CPYFPRTRN:
4885  case AArch64::CPYFPRTN:
4886  case AArch64::CPYFPT:
4887  case AArch64::CPYFPTWN:
4888  case AArch64::CPYFPTRN:
4889  case AArch64::CPYFPTN:
4890  case AArch64::CPYFM:
4891  case AArch64::CPYFMWN:
4892  case AArch64::CPYFMRN:
4893  case AArch64::CPYFMN:
4894  case AArch64::CPYFMWT:
4895  case AArch64::CPYFMWTWN:
4896  case AArch64::CPYFMWTRN:
4897  case AArch64::CPYFMWTN:
4898  case AArch64::CPYFMRT:
4899  case AArch64::CPYFMRTWN:
4900  case AArch64::CPYFMRTRN:
4901  case AArch64::CPYFMRTN:
4902  case AArch64::CPYFMT:
4903  case AArch64::CPYFMTWN:
4904  case AArch64::CPYFMTRN:
4905  case AArch64::CPYFMTN:
4906  case AArch64::CPYFE:
4907  case AArch64::CPYFEWN:
4908  case AArch64::CPYFERN:
4909  case AArch64::CPYFEN:
4910  case AArch64::CPYFEWT:
4911  case AArch64::CPYFEWTWN:
4912  case AArch64::CPYFEWTRN:
4913  case AArch64::CPYFEWTN:
4914  case AArch64::CPYFERT:
4915  case AArch64::CPYFERTWN:
4916  case AArch64::CPYFERTRN:
4917  case AArch64::CPYFERTN:
4918  case AArch64::CPYFET:
4919  case AArch64::CPYFETWN:
4920  case AArch64::CPYFETRN:
4921  case AArch64::CPYFETN:
4922  case AArch64::CPYP:
4923  case AArch64::CPYPWN:
4924  case AArch64::CPYPRN:
4925  case AArch64::CPYPN:
4926  case AArch64::CPYPWT:
4927  case AArch64::CPYPWTWN:
4928  case AArch64::CPYPWTRN:
4929  case AArch64::CPYPWTN:
4930  case AArch64::CPYPRT:
4931  case AArch64::CPYPRTWN:
4932  case AArch64::CPYPRTRN:
4933  case AArch64::CPYPRTN:
4934  case AArch64::CPYPT:
4935  case AArch64::CPYPTWN:
4936  case AArch64::CPYPTRN:
4937  case AArch64::CPYPTN:
4938  case AArch64::CPYM:
4939  case AArch64::CPYMWN:
4940  case AArch64::CPYMRN:
4941  case AArch64::CPYMN:
4942  case AArch64::CPYMWT:
4943  case AArch64::CPYMWTWN:
4944  case AArch64::CPYMWTRN:
4945  case AArch64::CPYMWTN:
4946  case AArch64::CPYMRT:
4947  case AArch64::CPYMRTWN:
4948  case AArch64::CPYMRTRN:
4949  case AArch64::CPYMRTN:
4950  case AArch64::CPYMT:
4951  case AArch64::CPYMTWN:
4952  case AArch64::CPYMTRN:
4953  case AArch64::CPYMTN:
4954  case AArch64::CPYE:
4955  case AArch64::CPYEWN:
4956  case AArch64::CPYERN:
4957  case AArch64::CPYEN:
4958  case AArch64::CPYEWT:
4959  case AArch64::CPYEWTWN:
4960  case AArch64::CPYEWTRN:
4961  case AArch64::CPYEWTN:
4962  case AArch64::CPYERT:
4963  case AArch64::CPYERTWN:
4964  case AArch64::CPYERTRN:
4965  case AArch64::CPYERTN:
4966  case AArch64::CPYET:
4967  case AArch64::CPYETWN:
4968  case AArch64::CPYETRN:
4969  case AArch64::CPYETN: {
4970  unsigned Xd_wb = Inst.getOperand(0).getReg();
4971  unsigned Xs_wb = Inst.getOperand(1).getReg();
4972  unsigned Xn_wb = Inst.getOperand(2).getReg();
4973  unsigned Xd = Inst.getOperand(3).getReg();
4974  unsigned Xs = Inst.getOperand(4).getReg();
4975  unsigned Xn = Inst.getOperand(5).getReg();
4976  if (Xd_wb != Xd)
4977  return Error(Loc[0],
4978  "invalid CPY instruction, Xd_wb and Xd do not match");
4979  if (Xs_wb != Xs)
4980  return Error(Loc[0],
4981  "invalid CPY instruction, Xs_wb and Xs do not match");
4982  if (Xn_wb != Xn)
4983  return Error(Loc[0],
4984  "invalid CPY instruction, Xn_wb and Xn do not match");
4985  if (Xd == Xs)
4986  return Error(Loc[0], "invalid CPY instruction, destination and source"
4987  " registers are the same");
4988  if (Xd == Xn)
4989  return Error(Loc[0], "invalid CPY instruction, destination and size"
4990  " registers are the same");
4991  if (Xs == Xn)
4992  return Error(Loc[0], "invalid CPY instruction, source and size"
4993  " registers are the same");
4994  break;
4995  }
4996  case AArch64::SETP:
4997  case AArch64::SETPT:
4998  case AArch64::SETPN:
4999  case AArch64::SETPTN:
5000  case AArch64::SETM:
5001  case AArch64::SETMT:
5002  case AArch64::SETMN:
5003  case AArch64::SETMTN:
5004  case AArch64::SETE:
5005  case AArch64::SETET:
5006  case AArch64::SETEN:
5007  case AArch64::SETETN:
5008  case AArch64::SETGP:
5009  case AArch64::SETGPT:
5010  case AArch64::SETGPN:
5011  case AArch64::SETGPTN:
5012  case AArch64::SETGM:
5013  case AArch64::SETGMT:
5014  case AArch64::SETGMN:
5015  case AArch64::SETGMTN:
5016  case AArch64::MOPSSETGE:
5017  case AArch64::MOPSSETGET:
5018  case AArch64::MOPSSETGEN:
5019  case AArch64::MOPSSETGETN: {
5020  unsigned Xd_wb = Inst.getOperand(0).getReg();
5021  unsigned Xn_wb = Inst.getOperand(1).getReg();
5022  unsigned Xd = Inst.getOperand(2).getReg();
5023  unsigned Xn = Inst.getOperand(3).getReg();
5024  unsigned Xm = Inst.getOperand(4).getReg();
5025  if (Xd_wb != Xd)
5026  return Error(Loc[0],
5027  "invalid SET instruction, Xd_wb and Xd do not match");
5028  if (Xn_wb != Xn)
5029  return Error(Loc[0],
5030  "invalid SET instruction, Xn_wb and Xn do not match");
5031  if (Xd == Xn)
5032  return Error(Loc[0], "invalid SET instruction, destination and size"
5033  " registers are the same");
5034  if (Xd == Xm)
5035  return Error(Loc[0], "invalid SET instruction, destination and source"
5036  " registers are the same");
5037  if (Xn == Xm)
5038  return Error(Loc[0], "invalid SET instruction, source and size"
5039  " registers are the same");
5040  break;
5041  }
5042  }
5043 
5044  // Now check immediate ranges. Separate from the above as there is overlap
5045  // in the instructions being checked and this keeps the nested conditionals
5046  // to a minimum.
5047  switch (Inst.getOpcode()) {
5048  case AArch64::ADDSWri:
5049  case AArch64::ADDSXri:
5050  case AArch64::ADDWri:
5051  case AArch64::ADDXri:
5052  case AArch64::SUBSWri:
5053  case AArch64::SUBSXri:
5054  case AArch64::SUBWri:
5055  case AArch64::SUBXri: {
5056  // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5057  // some slight duplication here.
5058  if (Inst.getOperand(2).isExpr()) {
5059  const MCExpr *Expr = Inst.getOperand(2).getExpr();
5060  AArch64MCExpr::VariantKind ELFRefKind;
5061  MCSymbolRefExpr::VariantKind DarwinRefKind;
5062  int64_t Addend;
5063  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
5064 
5065  // Only allow these with ADDXri.
5066  if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
5067  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
5068  Inst.getOpcode() == AArch64::ADDXri)
5069  return false;
5070 
5071  // Only allow these with ADDXri/ADDWri
5072  if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
5073  ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
5074  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
5075  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
5076  ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
5077  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
5078  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
5079  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
5080  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
5081  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
5082  (Inst.getOpcode() == AArch64::ADDXri ||
5083  Inst.getOpcode() == AArch64::ADDWri))
5084  return false;
5085 
5086  // Don't allow symbol refs in the immediate field otherwise
5087  // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5088  // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5089  // 'cmp w0, 'borked')
5090  return Error(Loc.back(), "invalid immediate expression");
5091  }
5092  // We don't validate more complex expressions here
5093  }
5094  return false;
5095  }
5096  default:
5097  return false;
5098  }
5099 }
5100 
5101 static std::string AArch64MnemonicSpellCheck(StringRef S,
5102  const FeatureBitset &FBS,
5103  unsigned VariantID = 0);
5104 
5105 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5108  switch (ErrCode) {
5109  case Match_InvalidTiedOperand: {
5110  RegConstraintEqualityTy EqTy =
5111  static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
5112  .getRegEqualityTy();
5113  switch (EqTy) {
5114  case RegConstraintEqualityTy::EqualsSubReg:
5115  return Error(Loc, "operand must be 64-bit form of destination register");
5116  case RegConstraintEqualityTy::EqualsSuperReg:
5117  return Error(Loc, "operand must be 32-bit form of destination register");
5118  case RegConstraintEqualityTy::EqualsReg:
5119  return Error(Loc, "operand must match destination register");
5120  }
5121  llvm_unreachable("Unknown RegConstraintEqualityTy");
5122  }
5123  case Match_MissingFeature:
5124  return Error(Loc,
5125  "instruction requires a CPU feature not currently enabled");
5126  case Match_InvalidOperand:
5127  return Error(Loc, "invalid operand for instruction");
5128  case Match_InvalidSuffix:
5129  return Error(Loc, "invalid type suffix for instruction");
5130  case Match_InvalidCondCode:
5131  return Error(Loc, "expected AArch64 condition code");
5132  case Match_AddSubRegExtendSmall:
5133  return Error(Loc,
5134  "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5135  case Match_AddSubRegExtendLarge:
5136  return Error(Loc,
5137  "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5138  case Match_AddSubSecondSource:
5139  return Error(Loc,
5140  "expected compatible register, symbol or integer in range [0, 4095]");
5141  case Match_LogicalSecondSource:
5142  return Error(Loc, "expected compatible register or logical immediate");
5143  case Match_InvalidMovImm32Shift:
5144  return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
5145  case Match_InvalidMovImm64Shift:
5146  return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
5147  case Match_AddSubRegShift32:
5148  return Error(Loc,
5149  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5150  case Match_AddSubRegShift64:
5151  return Error(Loc,
5152  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5153  case Match_InvalidFPImm:
5154  return Error(Loc,
5155  "expected compatible register or floating-point constant");
5156  case Match_InvalidMemoryIndexedSImm6:
5157  return Error(Loc, "index must be an integer in range [-32, 31].");
5158  case Match_InvalidMemoryIndexedSImm5:
5159  return Error(Loc, "index must be an integer in range [-16, 15].");
5160  case Match_InvalidMemoryIndexed1SImm4:
5161  return Error(Loc, "index must be an integer in range [-8, 7].");
5162  case Match_InvalidMemoryIndexed2SImm4:
5163  return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5164  case Match_InvalidMemoryIndexed3SImm4:
5165  return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5166  case Match_InvalidMemoryIndexed4SImm4:
5167  return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5168  case Match_InvalidMemoryIndexed16SImm4:
5169  return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5170  case Match_InvalidMemoryIndexed32SImm4:
5171  return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5172  case Match_InvalidMemoryIndexed1SImm6:
5173  return Error(Loc, "index must be an integer in range [-32, 31].");
5174  case Match_InvalidMemoryIndexedSImm8:
5175  return Error(Loc, "index must be an integer in range [-128, 127].");
5176  case Match_InvalidMemoryIndexedSImm9:
5177  return Error(Loc, "index must be an integer in range [-256, 255].");
5178  case Match_InvalidMemoryIndexed16SImm9:
5179  return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5180  case Match_InvalidMemoryIndexed8SImm10:
5181  return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5182  case Match_InvalidMemoryIndexed4SImm7:
5183  return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5184  case Match_InvalidMemoryIndexed8SImm7:
5185  return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5186  case Match_InvalidMemoryIndexed16SImm7:
5187  return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5188  case Match_InvalidMemoryIndexed8UImm5:
5189  return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5190  case Match_InvalidMemoryIndexed4UImm5:
5191  return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5192  case Match_InvalidMemoryIndexed2UImm5:
5193  return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5194  case Match_InvalidMemoryIndexed8UImm6:
5195  return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5196  case Match_InvalidMemoryIndexed16UImm6:
5197  return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5198  case Match_InvalidMemoryIndexed4UImm6:
5199  return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5200