LLVM  14.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AArch64InstrInfo.h"
16 #include "Utils/AArch64BaseInfo.h"
17 #include "llvm/ADT/APFloat.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringExtras.h"
24 #include "llvm/ADT/StringMap.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/StringSwitch.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/MC/MCContext.h"
29 #include "llvm/MC/MCExpr.h"
30 #include "llvm/MC/MCInst.h"
38 #include "llvm/MC/MCRegisterInfo.h"
39 #include "llvm/MC/MCStreamer.h"
41 #include "llvm/MC/MCSymbol.h"
43 #include "llvm/MC/MCValue.h"
45 #include "llvm/MC/TargetRegistry.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/Compiler.h"
50 #include "llvm/Support/SMLoc.h"
53 #include <cassert>
54 #include <cctype>
55 #include <cstdint>
56 #include <cstdio>
57 #include <string>
58 #include <tuple>
59 #include <utility>
60 #include <vector>
61 
62 using namespace llvm;
63 
64 namespace {
65 
66 enum class RegKind {
67  Scalar,
68  NeonVector,
69  SVEDataVector,
70  SVEPredicateVector,
71  Matrix
72 };
73 
74 enum class MatrixKind { Array, Tile, Row, Col };
75 
76 enum RegConstraintEqualityTy {
77  EqualsReg,
78  EqualsSuperReg,
79  EqualsSubReg
80 };
81 
82 class AArch64AsmParser : public MCTargetAsmParser {
83 private:
84  StringRef Mnemonic; ///< Instruction mnemonic.
85 
86  // Map of register aliases registers via the .req directive.
88 
89  class PrefixInfo {
90  public:
91  static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
92  PrefixInfo Prefix;
93  switch (Inst.getOpcode()) {
94  case AArch64::MOVPRFX_ZZ:
95  Prefix.Active = true;
96  Prefix.Dst = Inst.getOperand(0).getReg();
97  break;
98  case AArch64::MOVPRFX_ZPmZ_B:
99  case AArch64::MOVPRFX_ZPmZ_H:
100  case AArch64::MOVPRFX_ZPmZ_S:
101  case AArch64::MOVPRFX_ZPmZ_D:
102  Prefix.Active = true;
103  Prefix.Predicated = true;
104  Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
105  assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
106  "No destructive element size set for movprfx");
107  Prefix.Dst = Inst.getOperand(0).getReg();
108  Prefix.Pg = Inst.getOperand(2).getReg();
109  break;
110  case AArch64::MOVPRFX_ZPzZ_B:
111  case AArch64::MOVPRFX_ZPzZ_H:
112  case AArch64::MOVPRFX_ZPzZ_S:
113  case AArch64::MOVPRFX_ZPzZ_D:
114  Prefix.Active = true;
115  Prefix.Predicated = true;
116  Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
117  assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
118  "No destructive element size set for movprfx");
119  Prefix.Dst = Inst.getOperand(0).getReg();
120  Prefix.Pg = Inst.getOperand(1).getReg();
121  break;
122  default:
123  break;
124  }
125 
126  return Prefix;
127  }
128 
129  PrefixInfo() : Active(false), Predicated(false) {}
130  bool isActive() const { return Active; }
131  bool isPredicated() const { return Predicated; }
132  unsigned getElementSize() const {
133  assert(Predicated);
134  return ElementSize;
135  }
136  unsigned getDstReg() const { return Dst; }
137  unsigned getPgReg() const {
138  assert(Predicated);
139  return Pg;
140  }
141 
142  private:
143  bool Active;
144  bool Predicated;
145  unsigned ElementSize;
146  unsigned Dst;
147  unsigned Pg;
148  } NextPrefix;
149 
150  AArch64TargetStreamer &getTargetStreamer() {
151  MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
152  return static_cast<AArch64TargetStreamer &>(TS);
153  }
154 
155  SMLoc getLoc() const { return getParser().getTok().getLoc(); }
156 
157  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
158  void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
159  AArch64CC::CondCode parseCondCodeString(StringRef Cond);
160  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
161  unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
162  bool parseRegister(OperandVector &Operands);
163  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
164  bool parseNeonVectorList(OperandVector &Operands);
165  bool parseOptionalMulOperand(OperandVector &Operands);
166  bool parseKeywordOperand(OperandVector &Operands);
167  bool parseOperand(OperandVector &Operands, bool isCondCode,
168  bool invertCondCode);
169  bool parseImmExpr(int64_t &Out);
170  bool parseComma();
171  bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
172  unsigned Last);
173 
174  bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
176 
177  bool parseDirectiveArch(SMLoc L);
178  bool parseDirectiveArchExtension(SMLoc L);
179  bool parseDirectiveCPU(SMLoc L);
180  bool parseDirectiveInst(SMLoc L);
181 
182  bool parseDirectiveTLSDescCall(SMLoc L);
183 
184  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
185  bool parseDirectiveLtorg(SMLoc L);
186 
187  bool parseDirectiveReq(StringRef Name, SMLoc L);
188  bool parseDirectiveUnreq(SMLoc L);
189  bool parseDirectiveCFINegateRAState();
190  bool parseDirectiveCFIBKeyFrame();
191 
192  bool parseDirectiveVariantPCS(SMLoc L);
193 
194  bool parseDirectiveSEHAllocStack(SMLoc L);
195  bool parseDirectiveSEHPrologEnd(SMLoc L);
196  bool parseDirectiveSEHSaveR19R20X(SMLoc L);
197  bool parseDirectiveSEHSaveFPLR(SMLoc L);
198  bool parseDirectiveSEHSaveFPLRX(SMLoc L);
199  bool parseDirectiveSEHSaveReg(SMLoc L);
200  bool parseDirectiveSEHSaveRegX(SMLoc L);
201  bool parseDirectiveSEHSaveRegP(SMLoc L);
202  bool parseDirectiveSEHSaveRegPX(SMLoc L);
203  bool parseDirectiveSEHSaveLRPair(SMLoc L);
204  bool parseDirectiveSEHSaveFReg(SMLoc L);
205  bool parseDirectiveSEHSaveFRegX(SMLoc L);
206  bool parseDirectiveSEHSaveFRegP(SMLoc L);
207  bool parseDirectiveSEHSaveFRegPX(SMLoc L);
208  bool parseDirectiveSEHSetFP(SMLoc L);
209  bool parseDirectiveSEHAddFP(SMLoc L);
210  bool parseDirectiveSEHNop(SMLoc L);
211  bool parseDirectiveSEHSaveNext(SMLoc L);
212  bool parseDirectiveSEHEpilogStart(SMLoc L);
213  bool parseDirectiveSEHEpilogEnd(SMLoc L);
214  bool parseDirectiveSEHTrapFrame(SMLoc L);
215  bool parseDirectiveSEHMachineFrame(SMLoc L);
216  bool parseDirectiveSEHContext(SMLoc L);
217  bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
218 
219  bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
221  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
224  bool MatchingInlineAsm) override;
225 /// @name Auto-generated Match Functions
226 /// {
227 
228 #define GET_ASSEMBLER_HEADER
229 #include "AArch64GenAsmMatcher.inc"
230 
231  /// }
232 
233  OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
234  OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
235  RegKind MatchKind);
236  OperandMatchResultTy tryParseMatrixRegister(OperandVector &Operands);
238  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
239  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
240  OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
241  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
243  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
244  template <bool IsSVEPrefetch = false>
245  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
246  OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
247  OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
248  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
249  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
250  template<bool AddFPZeroAsLiteral>
252  OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
253  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
254  bool tryParseNeonVectorRegister(OperandVector &Operands);
255  OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
256  OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
257  template <bool ParseShiftExtend,
258  RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
259  OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
260  template <bool ParseShiftExtend, bool ParseSuffix>
261  OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
262  OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
263  template <RegKind VectorKind>
264  OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
265  bool ExpectMatch = false);
266  OperandMatchResultTy tryParseMatrixTileList(OperandVector &Operands);
267  OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
268  OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands);
269 
270 public:
271  enum AArch64MatchResultTy {
272  Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
273 #define GET_OPERAND_DIAGNOSTIC_TYPES
274 #include "AArch64GenAsmMatcher.inc"
275  };
276  bool IsILP32;
277 
278  AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
279  const MCInstrInfo &MII, const MCTargetOptions &Options)
280  : MCTargetAsmParser(Options, STI, MII) {
281  IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
283  MCStreamer &S = getParser().getStreamer();
284  if (S.getTargetStreamer() == nullptr)
286 
287  // Alias .hword/.word/.[dx]word to the target-independent
288  // .2byte/.4byte/.8byte directives as they have the same form and
289  // semantics:
290  /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
291  Parser.addAliasForDirective(".hword", ".2byte");
292  Parser.addAliasForDirective(".word", ".4byte");
293  Parser.addAliasForDirective(".dword", ".8byte");
294  Parser.addAliasForDirective(".xword", ".8byte");
295 
296  // Initialize the set of available features.
297  setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
298  }
299 
300  bool regsEqual(const MCParsedAsmOperand &Op1,
301  const MCParsedAsmOperand &Op2) const override;
302  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
303  SMLoc NameLoc, OperandVector &Operands) override;
304  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
305  OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
306  SMLoc &EndLoc) override;
307  bool ParseDirective(AsmToken DirectiveID) override;
308  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
309  unsigned Kind) override;
310 
311  static bool classifySymbolRef(const MCExpr *Expr,
312  AArch64MCExpr::VariantKind &ELFRefKind,
313  MCSymbolRefExpr::VariantKind &DarwinRefKind,
314  int64_t &Addend);
315 };
316 
317 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
318 /// instruction.
319 class AArch64Operand : public MCParsedAsmOperand {
320 private:
321  enum KindTy {
322  k_Immediate,
323  k_ShiftedImm,
324  k_CondCode,
325  k_Register,
326  k_MatrixRegister,
327  k_MatrixTileList,
328  k_SVCR,
329  k_VectorList,
330  k_VectorIndex,
331  k_Token,
332  k_SysReg,
333  k_SysCR,
334  k_Prefetch,
335  k_ShiftExtend,
336  k_FPImm,
337  k_Barrier,
338  k_PSBHint,
339  k_BTIHint,
340  } Kind;
341 
342  SMLoc StartLoc, EndLoc;
343 
344  struct TokOp {
345  const char *Data;
346  unsigned Length;
347  bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
348  };
349 
350  // Separate shift/extend operand.
351  struct ShiftExtendOp {
353  unsigned Amount;
354  bool HasExplicitAmount;
355  };
356 
357  struct RegOp {
358  unsigned RegNum;
359  RegKind Kind;
360  int ElementWidth;
361 
362  // The register may be allowed as a different register class,
363  // e.g. for GPR64as32 or GPR32as64.
364  RegConstraintEqualityTy EqualityTy;
365 
366  // In some cases the shift/extend needs to be explicitly parsed together
367  // with the register, rather than as a separate operand. This is needed
368  // for addressing modes where the instruction as a whole dictates the
369  // scaling/extend, rather than specific bits in the instruction.
370  // By parsing them as a single operand, we avoid the need to pass an
371  // extra operand in all CodeGen patterns (because all operands need to
372  // have an associated value), and we avoid the need to update TableGen to
373  // accept operands that have no associated bits in the instruction.
374  //
375  // An added benefit of parsing them together is that the assembler
376  // can give a sensible diagnostic if the scaling is not correct.
377  //
378  // The default is 'lsl #0' (HasExplicitAmount = false) if no
379  // ShiftExtend is specified.
380  ShiftExtendOp ShiftExtend;
381  };
382 
383  struct MatrixRegOp {
384  unsigned RegNum;
385  unsigned ElementWidth;
386  MatrixKind Kind;
387  };
388 
389  struct MatrixTileListOp {
390  unsigned RegMask = 0;
391  };
392 
393  struct VectorListOp {
394  unsigned RegNum;
395  unsigned Count;
396  unsigned NumElements;
397  unsigned ElementWidth;
398  RegKind RegisterKind;
399  };
400 
401  struct VectorIndexOp {
402  int Val;
403  };
404 
405  struct ImmOp {
406  const MCExpr *Val;
407  };
408 
409  struct ShiftedImmOp {
410  const MCExpr *Val;
411  unsigned ShiftAmount;
412  };
413 
414  struct CondCodeOp {
416  };
417 
418  struct FPImmOp {
419  uint64_t Val; // APFloat value bitcasted to uint64_t.
420  bool IsExact; // describes whether parsed value was exact.
421  };
422 
423  struct BarrierOp {
424  const char *Data;
425  unsigned Length;
426  unsigned Val; // Not the enum since not all values have names.
427  bool HasnXSModifier;
428  };
429 
430  struct SysRegOp {
431  const char *Data;
432  unsigned Length;
433  uint32_t MRSReg;
434  uint32_t MSRReg;
435  uint32_t PStateField;
436  };
437 
438  struct SysCRImmOp {
439  unsigned Val;
440  };
441 
442  struct PrefetchOp {
443  const char *Data;
444  unsigned Length;
445  unsigned Val;
446  };
447 
448  struct PSBHintOp {
449  const char *Data;
450  unsigned Length;
451  unsigned Val;
452  };
453 
454  struct BTIHintOp {
455  const char *Data;
456  unsigned Length;
457  unsigned Val;
458  };
459 
460  struct SVCROp {
461  const char *Data;
462  unsigned Length;
463  unsigned PStateField;
464  };
465 
466  union {
467  struct TokOp Tok;
468  struct RegOp Reg;
469  struct MatrixRegOp MatrixReg;
470  struct MatrixTileListOp MatrixTileList;
471  struct VectorListOp VectorList;
472  struct VectorIndexOp VectorIndex;
473  struct ImmOp Imm;
474  struct ShiftedImmOp ShiftedImm;
475  struct CondCodeOp CondCode;
476  struct FPImmOp FPImm;
477  struct BarrierOp Barrier;
478  struct SysRegOp SysReg;
479  struct SysCRImmOp SysCRImm;
480  struct PrefetchOp Prefetch;
481  struct PSBHintOp PSBHint;
482  struct BTIHintOp BTIHint;
483  struct ShiftExtendOp ShiftExtend;
484  struct SVCROp SVCR;
485  };
486 
487  // Keep the MCContext around as the MCExprs may need manipulated during
488  // the add<>Operands() calls.
489  MCContext &Ctx;
490 
491 public:
492  AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
493 
494  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
495  Kind = o.Kind;
496  StartLoc = o.StartLoc;
497  EndLoc = o.EndLoc;
498  switch (Kind) {
499  case k_Token:
500  Tok = o.Tok;
501  break;
502  case k_Immediate:
503  Imm = o.Imm;
504  break;
505  case k_ShiftedImm:
506  ShiftedImm = o.ShiftedImm;
507  break;
508  case k_CondCode:
509  CondCode = o.CondCode;
510  break;
511  case k_FPImm:
512  FPImm = o.FPImm;
513  break;
514  case k_Barrier:
515  Barrier = o.Barrier;
516  break;
517  case k_Register:
518  Reg = o.Reg;
519  break;
520  case k_MatrixRegister:
521  MatrixReg = o.MatrixReg;
522  break;
523  case k_MatrixTileList:
524  MatrixTileList = o.MatrixTileList;
525  break;
526  case k_VectorList:
527  VectorList = o.VectorList;
528  break;
529  case k_VectorIndex:
530  VectorIndex = o.VectorIndex;
531  break;
532  case k_SysReg:
533  SysReg = o.SysReg;
534  break;
535  case k_SysCR:
536  SysCRImm = o.SysCRImm;
537  break;
538  case k_Prefetch:
539  Prefetch = o.Prefetch;
540  break;
541  case k_PSBHint:
542  PSBHint = o.PSBHint;
543  break;
544  case k_BTIHint:
545  BTIHint = o.BTIHint;
546  break;
547  case k_ShiftExtend:
548  ShiftExtend = o.ShiftExtend;
549  break;
550  case k_SVCR:
551  SVCR = o.SVCR;
552  break;
553  }
554  }
555 
556  /// getStartLoc - Get the location of the first token of this operand.
557  SMLoc getStartLoc() const override { return StartLoc; }
558  /// getEndLoc - Get the location of the last token of this operand.
559  SMLoc getEndLoc() const override { return EndLoc; }
560 
561  StringRef getToken() const {
562  assert(Kind == k_Token && "Invalid access!");
563  return StringRef(Tok.Data, Tok.Length);
564  }
565 
566  bool isTokenSuffix() const {
567  assert(Kind == k_Token && "Invalid access!");
568  return Tok.IsSuffix;
569  }
570 
571  const MCExpr *getImm() const {
572  assert(Kind == k_Immediate && "Invalid access!");
573  return Imm.Val;
574  }
575 
576  const MCExpr *getShiftedImmVal() const {
577  assert(Kind == k_ShiftedImm && "Invalid access!");
578  return ShiftedImm.Val;
579  }
580 
581  unsigned getShiftedImmShift() const {
582  assert(Kind == k_ShiftedImm && "Invalid access!");
583  return ShiftedImm.ShiftAmount;
584  }
585 
587  assert(Kind == k_CondCode && "Invalid access!");
588  return CondCode.Code;
589  }
590 
591  APFloat getFPImm() const {
592  assert (Kind == k_FPImm && "Invalid access!");
593  return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
594  }
595 
596  bool getFPImmIsExact() const {
597  assert (Kind == k_FPImm && "Invalid access!");
598  return FPImm.IsExact;
599  }
600 
601  unsigned getBarrier() const {
602  assert(Kind == k_Barrier && "Invalid access!");
603  return Barrier.Val;
604  }
605 
606  StringRef getBarrierName() const {
607  assert(Kind == k_Barrier && "Invalid access!");
608  return StringRef(Barrier.Data, Barrier.Length);
609  }
610 
611  bool getBarriernXSModifier() const {
612  assert(Kind == k_Barrier && "Invalid access!");
613  return Barrier.HasnXSModifier;
614  }
615 
616  unsigned getReg() const override {
617  assert(Kind == k_Register && "Invalid access!");
618  return Reg.RegNum;
619  }
620 
621  unsigned getMatrixReg() const {
622  assert(Kind == k_MatrixRegister && "Invalid access!");
623  return MatrixReg.RegNum;
624  }
625 
626  unsigned getMatrixElementWidth() const {
627  assert(Kind == k_MatrixRegister && "Invalid access!");
628  return MatrixReg.ElementWidth;
629  }
630 
631  MatrixKind getMatrixKind() const {
632  assert(Kind == k_MatrixRegister && "Invalid access!");
633  return MatrixReg.Kind;
634  }
635 
636  unsigned getMatrixTileListRegMask() const {
637  assert(isMatrixTileList() && "Invalid access!");
638  return MatrixTileList.RegMask;
639  }
640 
641  RegConstraintEqualityTy getRegEqualityTy() const {
642  assert(Kind == k_Register && "Invalid access!");
643  return Reg.EqualityTy;
644  }
645 
646  unsigned getVectorListStart() const {
647  assert(Kind == k_VectorList && "Invalid access!");
648  return VectorList.RegNum;
649  }
650 
651  unsigned getVectorListCount() const {
652  assert(Kind == k_VectorList && "Invalid access!");
653  return VectorList.Count;
654  }
655 
656  int getVectorIndex() const {
657  assert(Kind == k_VectorIndex && "Invalid access!");
658  return VectorIndex.Val;
659  }
660 
661  StringRef getSysReg() const {
662  assert(Kind == k_SysReg && "Invalid access!");
663  return StringRef(SysReg.Data, SysReg.Length);
664  }
665 
666  unsigned getSysCR() const {
667  assert(Kind == k_SysCR && "Invalid access!");
668  return SysCRImm.Val;
669  }
670 
671  unsigned getPrefetch() const {
672  assert(Kind == k_Prefetch && "Invalid access!");
673  return Prefetch.Val;
674  }
675 
676  unsigned getPSBHint() const {
677  assert(Kind == k_PSBHint && "Invalid access!");
678  return PSBHint.Val;
679  }
680 
681  StringRef getPSBHintName() const {
682  assert(Kind == k_PSBHint && "Invalid access!");
683  return StringRef(PSBHint.Data, PSBHint.Length);
684  }
685 
686  unsigned getBTIHint() const {
687  assert(Kind == k_BTIHint && "Invalid access!");
688  return BTIHint.Val;
689  }
690 
691  StringRef getBTIHintName() const {
692  assert(Kind == k_BTIHint && "Invalid access!");
693  return StringRef(BTIHint.Data, BTIHint.Length);
694  }
695 
696  StringRef getSVCR() const {
697  assert(Kind == k_SVCR && "Invalid access!");
698  return StringRef(SVCR.Data, SVCR.Length);
699  }
700 
701  StringRef getPrefetchName() const {
702  assert(Kind == k_Prefetch && "Invalid access!");
703  return StringRef(Prefetch.Data, Prefetch.Length);
704  }
705 
706  AArch64_AM::ShiftExtendType getShiftExtendType() const {
707  if (Kind == k_ShiftExtend)
708  return ShiftExtend.Type;
709  if (Kind == k_Register)
710  return Reg.ShiftExtend.Type;
711  llvm_unreachable("Invalid access!");
712  }
713 
714  unsigned getShiftExtendAmount() const {
715  if (Kind == k_ShiftExtend)
716  return ShiftExtend.Amount;
717  if (Kind == k_Register)
718  return Reg.ShiftExtend.Amount;
719  llvm_unreachable("Invalid access!");
720  }
721 
722  bool hasShiftExtendAmount() const {
723  if (Kind == k_ShiftExtend)
724  return ShiftExtend.HasExplicitAmount;
725  if (Kind == k_Register)
726  return Reg.ShiftExtend.HasExplicitAmount;
727  llvm_unreachable("Invalid access!");
728  }
729 
730  bool isImm() const override { return Kind == k_Immediate; }
731  bool isMem() const override { return false; }
732 
733  bool isUImm6() const {
734  if (!isImm())
735  return false;
736  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
737  if (!MCE)
738  return false;
739  int64_t Val = MCE->getValue();
740  return (Val >= 0 && Val < 64);
741  }
742 
743  template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
744 
745  template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
746  return isImmScaled<Bits, Scale>(true);
747  }
748 
749  template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
750  return isImmScaled<Bits, Scale>(false);
751  }
752 
753  template <int Bits, int Scale>
754  DiagnosticPredicate isImmScaled(bool Signed) const {
755  if (!isImm())
757 
758  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
759  if (!MCE)
761 
762  int64_t MinVal, MaxVal;
763  if (Signed) {
764  int64_t Shift = Bits - 1;
765  MinVal = (int64_t(1) << Shift) * -Scale;
766  MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
767  } else {
768  MinVal = 0;
769  MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
770  }
771 
772  int64_t Val = MCE->getValue();
773  if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
775 
777  }
778 
779  DiagnosticPredicate isSVEPattern() const {
780  if (!isImm())
782  auto *MCE = dyn_cast<MCConstantExpr>(getImm());
783  if (!MCE)
785  int64_t Val = MCE->getValue();
786  if (Val >= 0 && Val < 32)
789  }
790 
791  bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
792  AArch64MCExpr::VariantKind ELFRefKind;
793  MCSymbolRefExpr::VariantKind DarwinRefKind;
794  int64_t Addend;
795  if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
796  Addend)) {
797  // If we don't understand the expression, assume the best and
798  // let the fixup and relocation code deal with it.
799  return true;
800  }
801 
802  if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
803  ELFRefKind == AArch64MCExpr::VK_LO12 ||
804  ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
805  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
806  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
807  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
808  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
809  ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
810  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
811  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
812  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
813  ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
814  // Note that we don't range-check the addend. It's adjusted modulo page
815  // size when converted, so there is no "out of range" condition when using
816  // @pageoff.
817  return true;
818  } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
819  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
820  // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
821  return Addend == 0;
822  }
823 
824  return false;
825  }
826 
827  template <int Scale> bool isUImm12Offset() const {
828  if (!isImm())
829  return false;
830 
831  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
832  if (!MCE)
833  return isSymbolicUImm12Offset(getImm());
834 
835  int64_t Val = MCE->getValue();
836  return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
837  }
838 
839  template <int N, int M>
840  bool isImmInRange() const {
841  if (!isImm())
842  return false;
843  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
844  if (!MCE)
845  return false;
846  int64_t Val = MCE->getValue();
847  return (Val >= N && Val <= M);
848  }
849 
850  // NOTE: Also used for isLogicalImmNot as anything that can be represented as
851  // a logical immediate can always be represented when inverted.
852  template <typename T>
853  bool isLogicalImm() const {
854  if (!isImm())
855  return false;
856  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
857  if (!MCE)
858  return false;
859 
860  int64_t Val = MCE->getValue();
861  // Avoid left shift by 64 directly.
862  uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
863  // Allow all-0 or all-1 in top bits to permit bitwise NOT.
864  if ((Val & Upper) && (Val & Upper) != Upper)
865  return false;
866 
867  return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
868  }
869 
870  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
871 
872  /// Returns the immediate value as a pair of (imm, shift) if the immediate is
873  /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
874  /// immediate that can be shifted by 'Shift'.
875  template <unsigned Width>
876  Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
877  if (isShiftedImm() && Width == getShiftedImmShift())
878  if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
879  return std::make_pair(CE->getValue(), Width);
880 
881  if (isImm())
882  if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
883  int64_t Val = CE->getValue();
884  if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
885  return std::make_pair(Val >> Width, Width);
886  else
887  return std::make_pair(Val, 0u);
888  }
889 
890  return {};
891  }
892 
893  bool isAddSubImm() const {
894  if (!isShiftedImm() && !isImm())
895  return false;
896 
897  const MCExpr *Expr;
898 
899  // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
900  if (isShiftedImm()) {
901  unsigned Shift = ShiftedImm.ShiftAmount;
902  Expr = ShiftedImm.Val;
903  if (Shift != 0 && Shift != 12)
904  return false;
905  } else {
906  Expr = getImm();
907  }
908 
909  AArch64MCExpr::VariantKind ELFRefKind;
910  MCSymbolRefExpr::VariantKind DarwinRefKind;
911  int64_t Addend;
912  if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
913  DarwinRefKind, Addend)) {
914  return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
915  || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
916  || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
917  || ELFRefKind == AArch64MCExpr::VK_LO12
918  || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
919  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
920  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
921  || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
922  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
923  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
924  || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
925  || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
926  || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
927  }
928 
929  // If it's a constant, it should be a real immediate in range.
930  if (auto ShiftedVal = getShiftedVal<12>())
931  return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
932 
933  // If it's an expression, we hope for the best and let the fixup/relocation
934  // code deal with it.
935  return true;
936  }
937 
938  bool isAddSubImmNeg() const {
939  if (!isShiftedImm() && !isImm())
940  return false;
941 
942  // Otherwise it should be a real negative immediate in range.
943  if (auto ShiftedVal = getShiftedVal<12>())
944  return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
945 
946  return false;
947  }
948 
949  // Signed value in the range -128 to +127. For element widths of
950  // 16 bits or higher it may also be a signed multiple of 256 in the
951  // range -32768 to +32512.
952  // For element-width of 8 bits a range of -128 to 255 is accepted,
953  // since a copy of a byte can be either signed/unsigned.
954  template <typename T>
956  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
958 
959  bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
960  std::is_same<int8_t, T>::value;
961  if (auto ShiftedImm = getShiftedVal<8>())
962  if (!(IsByte && ShiftedImm->second) &&
963  AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
964  << ShiftedImm->second))
966 
968  }
969 
970  // Unsigned value in the range 0 to 255. For element widths of
971  // 16 bits or higher it may also be a signed multiple of 256 in the
972  // range 0 to 65280.
973  template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
974  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
976 
977  bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
978  std::is_same<int8_t, T>::value;
979  if (auto ShiftedImm = getShiftedVal<8>())
980  if (!(IsByte && ShiftedImm->second) &&
981  AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
982  << ShiftedImm->second))
984 
986  }
987 
988  template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
989  if (isLogicalImm<T>() && !isSVECpyImm<T>())
992  }
993 
994  bool isCondCode() const { return Kind == k_CondCode; }
995 
996  bool isSIMDImmType10() const {
997  if (!isImm())
998  return false;
999  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1000  if (!MCE)
1001  return false;
1003  }
1004 
1005  template<int N>
1006  bool isBranchTarget() const {
1007  if (!isImm())
1008  return false;
1009  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1010  if (!MCE)
1011  return true;
1012  int64_t Val = MCE->getValue();
1013  if (Val & 0x3)
1014  return false;
1015  assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1016  return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1017  }
1018 
1019  bool
1020  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1021  if (!isImm())
1022  return false;
1023 
1024  AArch64MCExpr::VariantKind ELFRefKind;
1025  MCSymbolRefExpr::VariantKind DarwinRefKind;
1026  int64_t Addend;
1027  if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1028  DarwinRefKind, Addend)) {
1029  return false;
1030  }
1031  if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1032  return false;
1033 
1034  for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
1035  if (ELFRefKind == AllowedModifiers[i])
1036  return true;
1037  }
1038 
1039  return false;
1040  }
1041 
1042  bool isMovWSymbolG3() const {
1043  return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
1044  }
1045 
1046  bool isMovWSymbolG2() const {
1047  return isMovWSymbol(
1052  }
1053 
1054  bool isMovWSymbolG1() const {
1055  return isMovWSymbol(
1061  }
1062 
1063  bool isMovWSymbolG0() const {
1064  return isMovWSymbol(
1070  }
1071 
1072  template<int RegWidth, int Shift>
1073  bool isMOVZMovAlias() const {
1074  if (!isImm()) return false;
1075 
1076  const MCExpr *E = getImm();
1077  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1078  uint64_t Value = CE->getValue();
1079 
1080  return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1081  }
1082  // Only supports the case of Shift being 0 if an expression is used as an
1083  // operand
1084  return !Shift && E;
1085  }
1086 
1087  template<int RegWidth, int Shift>
1088  bool isMOVNMovAlias() const {
1089  if (!isImm()) return false;
1090 
1091  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1092  if (!CE) return false;
1093  uint64_t Value = CE->getValue();
1094 
1095  return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1096  }
1097 
1098  bool isFPImm() const {
1099  return Kind == k_FPImm &&
1100  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1101  }
1102 
1103  bool isBarrier() const {
1104  return Kind == k_Barrier && !getBarriernXSModifier();
1105  }
1106  bool isBarriernXS() const {
1107  return Kind == k_Barrier && getBarriernXSModifier();
1108  }
1109  bool isSysReg() const { return Kind == k_SysReg; }
1110 
1111  bool isMRSSystemRegister() const {
1112  if (!isSysReg()) return false;
1113 
1114  return SysReg.MRSReg != -1U;
1115  }
1116 
1117  bool isMSRSystemRegister() const {
1118  if (!isSysReg()) return false;
1119  return SysReg.MSRReg != -1U;
1120  }
1121 
1122  bool isSystemPStateFieldWithImm0_1() const {
1123  if (!isSysReg()) return false;
1124  return (SysReg.PStateField == AArch64PState::PAN ||
1125  SysReg.PStateField == AArch64PState::DIT ||
1126  SysReg.PStateField == AArch64PState::UAO ||
1127  SysReg.PStateField == AArch64PState::SSBS);
1128  }
1129 
1130  bool isSystemPStateFieldWithImm0_15() const {
1131  if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1132  return SysReg.PStateField != -1U;
1133  }
1134 
1135  bool isSVCR() const {
1136  if (Kind != k_SVCR)
1137  return false;
1138  return SVCR.PStateField != -1U;
1139  }
1140 
1141  bool isReg() const override {
1142  return Kind == k_Register;
1143  }
1144 
1145  bool isScalarReg() const {
1146  return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1147  }
1148 
1149  bool isNeonVectorReg() const {
1150  return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1151  }
1152 
1153  bool isNeonVectorRegLo() const {
1154  return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1155  (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1156  Reg.RegNum) ||
1157  AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1158  Reg.RegNum));
1159  }
1160 
1161  bool isMatrix() const { return Kind == k_MatrixRegister; }
1162  bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1163 
1164  template <unsigned Class> bool isSVEVectorReg() const {
1165  RegKind RK;
1166  switch (Class) {
1167  case AArch64::ZPRRegClassID:
1168  case AArch64::ZPR_3bRegClassID:
1169  case AArch64::ZPR_4bRegClassID:
1170  RK = RegKind::SVEDataVector;
1171  break;
1172  case AArch64::PPRRegClassID:
1173  case AArch64::PPR_3bRegClassID:
1174  RK = RegKind::SVEPredicateVector;
1175  break;
1176  default:
1177  llvm_unreachable("Unsupport register class");
1178  }
1179 
1180  return (Kind == k_Register && Reg.Kind == RK) &&
1181  AArch64MCRegisterClasses[Class].contains(getReg());
1182  }
1183 
1184  template <unsigned Class> bool isFPRasZPR() const {
1185  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1186  AArch64MCRegisterClasses[Class].contains(getReg());
1187  }
1188 
1189  template <int ElementWidth, unsigned Class>
1190  DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1191  if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1193 
1194  if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1196 
1198  }
1199 
1200  template <int ElementWidth, unsigned Class>
1201  DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1202  if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1204 
1205  if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1207 
1209  }
1210 
1211  template <int ElementWidth, unsigned Class,
1212  AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1213  bool ShiftWidthAlwaysSame>
1214  DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1215  auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1216  if (!VectorMatch.isMatch())
1218 
1219  // Give a more specific diagnostic when the user has explicitly typed in
1220  // a shift-amount that does not match what is expected, but for which
1221  // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1222  bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1223  if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1224  ShiftExtendTy == AArch64_AM::SXTW) &&
1225  !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1227 
1228  if (MatchShift && ShiftExtendTy == getShiftExtendType())
1230 
1232  }
1233 
1234  bool isGPR32as64() const {
1235  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1236  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1237  }
1238 
1239  bool isGPR64as32() const {
1240  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1241  AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1242  }
1243 
1244  bool isGPR64x8() const {
1245  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1246  AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1247  Reg.RegNum);
1248  }
1249 
1250  bool isWSeqPair() const {
1251  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1252  AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1253  Reg.RegNum);
1254  }
1255 
1256  bool isXSeqPair() const {
1257  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1258  AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1259  Reg.RegNum);
1260  }
1261 
1262  template<int64_t Angle, int64_t Remainder>
1263  DiagnosticPredicate isComplexRotation() const {
1264  if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1265 
1266  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1267  if (!CE) return DiagnosticPredicateTy::NoMatch;
1268  uint64_t Value = CE->getValue();
1269 
1270  if (Value % Angle == Remainder && Value <= 270)
1273  }
1274 
1275  template <unsigned RegClassID> bool isGPR64() const {
1276  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1277  AArch64MCRegisterClasses[RegClassID].contains(getReg());
1278  }
1279 
1280  template <unsigned RegClassID, int ExtWidth>
1281  DiagnosticPredicate isGPR64WithShiftExtend() const {
1282  if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1284 
1285  if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1286  getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1289  }
1290 
1291  /// Is this a vector list with the type implicit (presumably attached to the
1292  /// instruction itself)?
1293  template <RegKind VectorKind, unsigned NumRegs>
1294  bool isImplicitlyTypedVectorList() const {
1295  return Kind == k_VectorList && VectorList.Count == NumRegs &&
1296  VectorList.NumElements == 0 &&
1297  VectorList.RegisterKind == VectorKind;
1298  }
1299 
1300  template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1301  unsigned ElementWidth>
1302  bool isTypedVectorList() const {
1303  if (Kind != k_VectorList)
1304  return false;
1305  if (VectorList.Count != NumRegs)
1306  return false;
1307  if (VectorList.RegisterKind != VectorKind)
1308  return false;
1309  if (VectorList.ElementWidth != ElementWidth)
1310  return false;
1311  return VectorList.NumElements == NumElements;
1312  }
1313 
1314  template <int Min, int Max>
1315  DiagnosticPredicate isVectorIndex() const {
1316  if (Kind != k_VectorIndex)
1318  if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1321  }
1322 
1323  bool isToken() const override { return Kind == k_Token; }
1324 
1325  bool isTokenEqual(StringRef Str) const {
1326  return Kind == k_Token && getToken() == Str;
1327  }
1328  bool isSysCR() const { return Kind == k_SysCR; }
1329  bool isPrefetch() const { return Kind == k_Prefetch; }
1330  bool isPSBHint() const { return Kind == k_PSBHint; }
1331  bool isBTIHint() const { return Kind == k_BTIHint; }
1332  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1333  bool isShifter() const {
1334  if (!isShiftExtend())
1335  return false;
1336 
1337  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1338  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1339  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1340  ST == AArch64_AM::MSL);
1341  }
1342 
1343  template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1344  if (Kind != k_FPImm)
1346 
1347  if (getFPImmIsExact()) {
1348  // Lookup the immediate from table of supported immediates.
1349  auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1350  assert(Desc && "Unknown enum value");
1351 
1352  // Calculate its FP value.
1353  APFloat RealVal(APFloat::IEEEdouble());
1354  auto StatusOrErr =
1355  RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1356  if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1357  llvm_unreachable("FP immediate is not exact");
1358 
1359  if (getFPImm().bitwiseIsEqual(RealVal))
1361  }
1362 
1364  }
1365 
1366  template <unsigned ImmA, unsigned ImmB>
1367  DiagnosticPredicate isExactFPImm() const {
1369  if ((Res = isExactFPImm<ImmA>()))
1371  if ((Res = isExactFPImm<ImmB>()))
1373  return Res;
1374  }
1375 
1376  bool isExtend() const {
1377  if (!isShiftExtend())
1378  return false;
1379 
1380  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1381  return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1382  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1383  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1384  ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1385  ET == AArch64_AM::LSL) &&
1386  getShiftExtendAmount() <= 4;
1387  }
1388 
1389  bool isExtend64() const {
1390  if (!isExtend())
1391  return false;
1392  // Make sure the extend expects a 32-bit source register.
1393  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1394  return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1395  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1396  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1397  }
1398 
1399  bool isExtendLSL64() const {
1400  if (!isExtend())
1401  return false;
1402  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1403  return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1404  ET == AArch64_AM::LSL) &&
1405  getShiftExtendAmount() <= 4;
1406  }
1407 
1408  template<int Width> bool isMemXExtend() const {
1409  if (!isExtend())
1410  return false;
1411  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1412  return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1413  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1414  getShiftExtendAmount() == 0);
1415  }
1416 
1417  template<int Width> bool isMemWExtend() const {
1418  if (!isExtend())
1419  return false;
1420  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1421  return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1422  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1423  getShiftExtendAmount() == 0);
1424  }
1425 
1426  template <unsigned width>
1427  bool isArithmeticShifter() const {
1428  if (!isShifter())
1429  return false;
1430 
1431  // An arithmetic shifter is LSL, LSR, or ASR.
1432  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1433  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1434  ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1435  }
1436 
1437  template <unsigned width>
1438  bool isLogicalShifter() const {
1439  if (!isShifter())
1440  return false;
1441 
1442  // A logical shifter is LSL, LSR, ASR or ROR.
1443  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1444  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1445  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1446  getShiftExtendAmount() < width;
1447  }
1448 
1449  bool isMovImm32Shifter() const {
1450  if (!isShifter())
1451  return false;
1452 
1453  // A MOVi shifter is LSL of 0, 16, 32, or 48.
1454  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1455  if (ST != AArch64_AM::LSL)
1456  return false;
1457  uint64_t Val = getShiftExtendAmount();
1458  return (Val == 0 || Val == 16);
1459  }
1460 
1461  bool isMovImm64Shifter() const {
1462  if (!isShifter())
1463  return false;
1464 
1465  // A MOVi shifter is LSL of 0 or 16.
1466  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1467  if (ST != AArch64_AM::LSL)
1468  return false;
1469  uint64_t Val = getShiftExtendAmount();
1470  return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1471  }
1472 
1473  bool isLogicalVecShifter() const {
1474  if (!isShifter())
1475  return false;
1476 
1477  // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1478  unsigned Shift = getShiftExtendAmount();
1479  return getShiftExtendType() == AArch64_AM::LSL &&
1480  (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1481  }
1482 
1483  bool isLogicalVecHalfWordShifter() const {
1484  if (!isLogicalVecShifter())
1485  return false;
1486 
1487  // A logical vector shifter is a left shift by 0 or 8.
1488  unsigned Shift = getShiftExtendAmount();
1489  return getShiftExtendType() == AArch64_AM::LSL &&
1490  (Shift == 0 || Shift == 8);
1491  }
1492 
1493  bool isMoveVecShifter() const {
1494  if (!isShiftExtend())
1495  return false;
1496 
1497  // A logical vector shifter is a left shift by 8 or 16.
1498  unsigned Shift = getShiftExtendAmount();
1499  return getShiftExtendType() == AArch64_AM::MSL &&
1500  (Shift == 8 || Shift == 16);
1501  }
1502 
1503  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1504  // to LDUR/STUR when the offset is not legal for the former but is for
1505  // the latter. As such, in addition to checking for being a legal unscaled
1506  // address, also check that it is not a legal scaled address. This avoids
1507  // ambiguity in the matcher.
1508  template<int Width>
1509  bool isSImm9OffsetFB() const {
1510  return isSImm<9>() && !isUImm12Offset<Width / 8>();
1511  }
1512 
1513  bool isAdrpLabel() const {
1514  // Validation was handled during parsing, so we just sanity check that
1515  // something didn't go haywire.
1516  if (!isImm())
1517  return false;
1518 
1519  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1520  int64_t Val = CE->getValue();
1521  int64_t Min = - (4096 * (1LL << (21 - 1)));
1522  int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1523  return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1524  }
1525 
1526  return true;
1527  }
1528 
1529  bool isAdrLabel() const {
1530  // Validation was handled during parsing, so we just sanity check that
1531  // something didn't go haywire.
1532  if (!isImm())
1533  return false;
1534 
1535  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1536  int64_t Val = CE->getValue();
1537  int64_t Min = - (1LL << (21 - 1));
1538  int64_t Max = ((1LL << (21 - 1)) - 1);
1539  return Val >= Min && Val <= Max;
1540  }
1541 
1542  return true;
1543  }
1544 
1545  template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1546  DiagnosticPredicate isMatrixRegOperand() const {
1547  if (!isMatrix())
1549  if (getMatrixKind() != Kind ||
1550  !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1551  EltSize != getMatrixElementWidth())
1554  }
1555 
1556  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1557  // Add as immediates when possible. Null MCExpr = 0.
1558  if (!Expr)
1560  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1561  Inst.addOperand(MCOperand::createImm(CE->getValue()));
1562  else
1563  Inst.addOperand(MCOperand::createExpr(Expr));
1564  }
1565 
1566  void addRegOperands(MCInst &Inst, unsigned N) const {
1567  assert(N == 1 && "Invalid number of operands!");
1569  }
1570 
1571  void addMatrixOperands(MCInst &Inst, unsigned N) const {
1572  assert(N == 1 && "Invalid number of operands!");
1573  Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1574  }
1575 
1576  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1577  assert(N == 1 && "Invalid number of operands!");
1578  assert(
1579  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1580 
1581  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1582  uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1583  RI->getEncodingValue(getReg()));
1584 
1586  }
1587 
1588  void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1589  assert(N == 1 && "Invalid number of operands!");
1590  assert(
1591  AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1592 
1593  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1594  uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1595  RI->getEncodingValue(getReg()));
1596 
1598  }
1599 
1600  template <int Width>
1601  void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1602  unsigned Base;
1603  switch (Width) {
1604  case 8: Base = AArch64::B0; break;
1605  case 16: Base = AArch64::H0; break;
1606  case 32: Base = AArch64::S0; break;
1607  case 64: Base = AArch64::D0; break;
1608  case 128: Base = AArch64::Q0; break;
1609  default:
1610  llvm_unreachable("Unsupported width");
1611  }
1612  Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1613  }
1614 
1615  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1616  assert(N == 1 && "Invalid number of operands!");
1617  assert(
1618  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1619  Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1620  }
1621 
1622  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1623  assert(N == 1 && "Invalid number of operands!");
1624  assert(
1625  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1627  }
1628 
1629  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1630  assert(N == 1 && "Invalid number of operands!");
1632  }
1633 
1634  enum VecListIndexType {
1635  VecListIdx_DReg = 0,
1636  VecListIdx_QReg = 1,
1637  VecListIdx_ZReg = 2,
1638  };
1639 
1640  template <VecListIndexType RegTy, unsigned NumRegs>
1641  void addVectorListOperands(MCInst &Inst, unsigned N) const {
1642  assert(N == 1 && "Invalid number of operands!");
1643  static const unsigned FirstRegs[][5] = {
1644  /* DReg */ { AArch64::Q0,
1645  AArch64::D0, AArch64::D0_D1,
1646  AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1647  /* QReg */ { AArch64::Q0,
1648  AArch64::Q0, AArch64::Q0_Q1,
1649  AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1650  /* ZReg */ { AArch64::Z0,
1651  AArch64::Z0, AArch64::Z0_Z1,
1652  AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1653  };
1654 
1655  assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1656  " NumRegs must be <= 4 for ZRegs");
1657 
1658  unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1659  Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1660  FirstRegs[(unsigned)RegTy][0]));
1661  }
1662 
1663  void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1664  assert(N == 1 && "Invalid number of operands!");
1665  unsigned RegMask = getMatrixTileListRegMask();
1666  assert(RegMask <= 0xFF && "Invalid mask!");
1667  Inst.addOperand(MCOperand::createImm(RegMask));
1668  }
1669 
1670  void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1671  assert(N == 1 && "Invalid number of operands!");
1672  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1673  }
1674 
1675  template <unsigned ImmIs0, unsigned ImmIs1>
1676  void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1677  assert(N == 1 && "Invalid number of operands!");
1678  assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1679  Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1680  }
1681 
1682  void addImmOperands(MCInst &Inst, unsigned N) const {
1683  assert(N == 1 && "Invalid number of operands!");
1684  // If this is a pageoff symrefexpr with an addend, adjust the addend
1685  // to be only the page-offset portion. Otherwise, just add the expr
1686  // as-is.
1687  addExpr(Inst, getImm());
1688  }
1689 
1690  template <int Shift>
1691  void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1692  assert(N == 2 && "Invalid number of operands!");
1693  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1694  Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1695  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1696  } else if (isShiftedImm()) {
1697  addExpr(Inst, getShiftedImmVal());
1698  Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1699  } else {
1700  addExpr(Inst, getImm());
1702  }
1703  }
1704 
1705  template <int Shift>
1706  void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1707  assert(N == 2 && "Invalid number of operands!");
1708  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1709  Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1710  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1711  } else
1712  llvm_unreachable("Not a shifted negative immediate");
1713  }
1714 
1715  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1716  assert(N == 1 && "Invalid number of operands!");
1718  }
1719 
1720  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1721  assert(N == 1 && "Invalid number of operands!");
1722  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1723  if (!MCE)
1724  addExpr(Inst, getImm());
1725  else
1726  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1727  }
1728 
1729  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1730  addImmOperands(Inst, N);
1731  }
1732 
1733  template<int Scale>
1734  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1735  assert(N == 1 && "Invalid number of operands!");
1736  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1737 
1738  if (!MCE) {
1739  Inst.addOperand(MCOperand::createExpr(getImm()));
1740  return;
1741  }
1742  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1743  }
1744 
1745  void addUImm6Operands(MCInst &Inst, unsigned N) const {
1746  assert(N == 1 && "Invalid number of operands!");
1747  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1749  }
1750 
1751  template <int Scale>
1752  void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1753  assert(N == 1 && "Invalid number of operands!");
1754  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1755  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1756  }
1757 
1758  template <typename T>
1759  void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1760  assert(N == 1 && "Invalid number of operands!");
1761  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1762  std::make_unsigned_t<T> Val = MCE->getValue();
1763  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1764  Inst.addOperand(MCOperand::createImm(encoding));
1765  }
1766 
1767  template <typename T>
1768  void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1769  assert(N == 1 && "Invalid number of operands!");
1770  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1771  std::make_unsigned_t<T> Val = ~MCE->getValue();
1772  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1773  Inst.addOperand(MCOperand::createImm(encoding));
1774  }
1775 
1776  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1777  assert(N == 1 && "Invalid number of operands!");
1778  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1780  Inst.addOperand(MCOperand::createImm(encoding));
1781  }
1782 
1783  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1784  // Branch operands don't encode the low bits, so shift them off
1785  // here. If it's a label, however, just put it on directly as there's
1786  // not enough information now to do anything.
1787  assert(N == 1 && "Invalid number of operands!");
1788  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1789  if (!MCE) {
1790  addExpr(Inst, getImm());
1791  return;
1792  }
1793  assert(MCE && "Invalid constant immediate operand!");
1794  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1795  }
1796 
1797  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1798  // Branch operands don't encode the low bits, so shift them off
1799  // here. If it's a label, however, just put it on directly as there's
1800  // not enough information now to do anything.
1801  assert(N == 1 && "Invalid number of operands!");
1802  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1803  if (!MCE) {
1804  addExpr(Inst, getImm());
1805  return;
1806  }
1807  assert(MCE && "Invalid constant immediate operand!");
1808  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1809  }
1810 
1811  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1812  // Branch operands don't encode the low bits, so shift them off
1813  // here. If it's a label, however, just put it on directly as there's
1814  // not enough information now to do anything.
1815  assert(N == 1 && "Invalid number of operands!");
1816  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1817  if (!MCE) {
1818  addExpr(Inst, getImm());
1819  return;
1820  }
1821  assert(MCE && "Invalid constant immediate operand!");
1822  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1823  }
1824 
1825  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1826  assert(N == 1 && "Invalid number of operands!");
1828  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1829  }
1830 
1831  void addBarrierOperands(MCInst &Inst, unsigned N) const {
1832  assert(N == 1 && "Invalid number of operands!");
1833  Inst.addOperand(MCOperand::createImm(getBarrier()));
1834  }
1835 
1836  void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
1837  assert(N == 1 && "Invalid number of operands!");
1838  Inst.addOperand(MCOperand::createImm(getBarrier()));
1839  }
1840 
1841  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1842  assert(N == 1 && "Invalid number of operands!");
1843 
1844  Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1845  }
1846 
1847  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1848  assert(N == 1 && "Invalid number of operands!");
1849 
1850  Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1851  }
1852 
1853  void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1854  assert(N == 1 && "Invalid number of operands!");
1855 
1856  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1857  }
1858 
1859  void addSVCROperands(MCInst &Inst, unsigned N) const {
1860  assert(N == 1 && "Invalid number of operands!");
1861 
1862  Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
1863  }
1864 
1865  void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1866  assert(N == 1 && "Invalid number of operands!");
1867 
1868  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1869  }
1870 
1871  void addSysCROperands(MCInst &Inst, unsigned N) const {
1872  assert(N == 1 && "Invalid number of operands!");
1873  Inst.addOperand(MCOperand::createImm(getSysCR()));
1874  }
1875 
1876  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1877  assert(N == 1 && "Invalid number of operands!");
1878  Inst.addOperand(MCOperand::createImm(getPrefetch()));
1879  }
1880 
1881  void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1882  assert(N == 1 && "Invalid number of operands!");
1883  Inst.addOperand(MCOperand::createImm(getPSBHint()));
1884  }
1885 
1886  void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1887  assert(N == 1 && "Invalid number of operands!");
1888  Inst.addOperand(MCOperand::createImm(getBTIHint()));
1889  }
1890 
1891  void addShifterOperands(MCInst &Inst, unsigned N) const {
1892  assert(N == 1 && "Invalid number of operands!");
1893  unsigned Imm =
1894  AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1895  Inst.addOperand(MCOperand::createImm(Imm));
1896  }
1897 
1898  void addExtendOperands(MCInst &Inst, unsigned N) const {
1899  assert(N == 1 && "Invalid number of operands!");
1900  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1901  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1902  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1903  Inst.addOperand(MCOperand::createImm(Imm));
1904  }
1905 
1906  void addExtend64Operands(MCInst &Inst, unsigned N) const {
1907  assert(N == 1 && "Invalid number of operands!");
1908  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1909  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1910  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1911  Inst.addOperand(MCOperand::createImm(Imm));
1912  }
1913 
1914  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1915  assert(N == 2 && "Invalid number of operands!");
1916  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1917  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1918  Inst.addOperand(MCOperand::createImm(IsSigned));
1919  Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1920  }
1921 
1922  // For 8-bit load/store instructions with a register offset, both the
1923  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1924  // they're disambiguated by whether the shift was explicit or implicit rather
1925  // than its size.
1926  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1927  assert(N == 2 && "Invalid number of operands!");
1928  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1929  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1930  Inst.addOperand(MCOperand::createImm(IsSigned));
1931  Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1932  }
1933 
1934  template<int Shift>
1935  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1936  assert(N == 1 && "Invalid number of operands!");
1937 
1938  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1939  if (CE) {
1940  uint64_t Value = CE->getValue();
1941  Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1942  } else {
1943  addExpr(Inst, getImm());
1944  }
1945  }
1946 
1947  template<int Shift>
1948  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1949  assert(N == 1 && "Invalid number of operands!");
1950 
1951  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1952  uint64_t Value = CE->getValue();
1953  Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1954  }
1955 
1956  void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1957  assert(N == 1 && "Invalid number of operands!");
1958  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1959  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1960  }
1961 
1962  void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1963  assert(N == 1 && "Invalid number of operands!");
1964  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1965  Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1966  }
1967 
1968  void print(raw_ostream &OS) const override;
1969 
1970  static std::unique_ptr<AArch64Operand>
1971  CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
1972  auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1973  Op->Tok.Data = Str.data();
1974  Op->Tok.Length = Str.size();
1975  Op->Tok.IsSuffix = IsSuffix;
1976  Op->StartLoc = S;
1977  Op->EndLoc = S;
1978  return Op;
1979  }
1980 
1981  static std::unique_ptr<AArch64Operand>
1982  CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1983  RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1985  unsigned ShiftAmount = 0,
1986  unsigned HasExplicitAmount = false) {
1987  auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1988  Op->Reg.RegNum = RegNum;
1989  Op->Reg.Kind = Kind;
1990  Op->Reg.ElementWidth = 0;
1991  Op->Reg.EqualityTy = EqTy;
1992  Op->Reg.ShiftExtend.Type = ExtTy;
1993  Op->Reg.ShiftExtend.Amount = ShiftAmount;
1994  Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1995  Op->StartLoc = S;
1996  Op->EndLoc = E;
1997  return Op;
1998  }
1999 
2000  static std::unique_ptr<AArch64Operand>
2001  CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2002  SMLoc S, SMLoc E, MCContext &Ctx,
2004  unsigned ShiftAmount = 0,
2005  unsigned HasExplicitAmount = false) {
2006  assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2007  Kind == RegKind::SVEPredicateVector) &&
2008  "Invalid vector kind");
2009  auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2010  HasExplicitAmount);
2011  Op->Reg.ElementWidth = ElementWidth;
2012  return Op;
2013  }
2014 
2015  static std::unique_ptr<AArch64Operand>
2016  CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
2017  unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
2018  MCContext &Ctx) {
2019  auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2020  Op->VectorList.RegNum = RegNum;
2021  Op->VectorList.Count = Count;
2022  Op->VectorList.NumElements = NumElements;
2023  Op->VectorList.ElementWidth = ElementWidth;
2024  Op->VectorList.RegisterKind = RegisterKind;
2025  Op->StartLoc = S;
2026  Op->EndLoc = E;
2027  return Op;
2028  }
2029 
2030  static std::unique_ptr<AArch64Operand>
2031  CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2032  auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2033  Op->VectorIndex.Val = Idx;
2034  Op->StartLoc = S;
2035  Op->EndLoc = E;
2036  return Op;
2037  }
2038 
2039  static std::unique_ptr<AArch64Operand>
2040  CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2041  auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2042  Op->MatrixTileList.RegMask = RegMask;
2043  Op->StartLoc = S;
2044  Op->EndLoc = E;
2045  return Op;
2046  }
2047 
2048  static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2049  const unsigned ElementWidth) {
2050  static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2051  RegMap = {
2052  {{0, AArch64::ZAB0},
2053  {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2054  AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2055  {{8, AArch64::ZAB0},
2056  {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2057  AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2058  {{16, AArch64::ZAH0},
2059  {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2060  {{16, AArch64::ZAH1},
2061  {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2062  {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2063  {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2064  {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2065  {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2066  };
2067 
2068  if (ElementWidth == 64)
2069  OutRegs.insert(Reg);
2070  else {
2071  std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2072  assert(!Regs.empty() && "Invalid tile or element width!");
2073  for (auto OutReg : Regs)
2074  OutRegs.insert(OutReg);
2075  }
2076  }
2077 
2078  static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2079  SMLoc E, MCContext &Ctx) {
2080  auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2081  Op->Imm.Val = Val;
2082  Op->StartLoc = S;
2083  Op->EndLoc = E;
2084  return Op;
2085  }
2086 
2087  static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2088  unsigned ShiftAmount,
2089  SMLoc S, SMLoc E,
2090  MCContext &Ctx) {
2091  auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2092  Op->ShiftedImm .Val = Val;
2093  Op->ShiftedImm.ShiftAmount = ShiftAmount;
2094  Op->StartLoc = S;
2095  Op->EndLoc = E;
2096  return Op;
2097  }
2098 
2099  static std::unique_ptr<AArch64Operand>
2100  CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2101  auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2102  Op->CondCode.Code = Code;
2103  Op->StartLoc = S;
2104  Op->EndLoc = E;
2105  return Op;
2106  }
2107 
2108  static std::unique_ptr<AArch64Operand>
2109  CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2110  auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2111  Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2112  Op->FPImm.IsExact = IsExact;
2113  Op->StartLoc = S;
2114  Op->EndLoc = S;
2115  return Op;
2116  }
2117 
2118  static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2119  StringRef Str,
2120  SMLoc S,
2121  MCContext &Ctx,
2122  bool HasnXSModifier) {
2123  auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2124  Op->Barrier.Val = Val;
2125  Op->Barrier.Data = Str.data();
2126  Op->Barrier.Length = Str.size();
2127  Op->Barrier.HasnXSModifier = HasnXSModifier;
2128  Op->StartLoc = S;
2129  Op->EndLoc = S;
2130  return Op;
2131  }
2132 
2133  static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2134  uint32_t MRSReg,
2135  uint32_t MSRReg,
2136  uint32_t PStateField,
2137  MCContext &Ctx) {
2138  auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2139  Op->SysReg.Data = Str.data();
2140  Op->SysReg.Length = Str.size();
2141  Op->SysReg.MRSReg = MRSReg;
2142  Op->SysReg.MSRReg = MSRReg;
2143  Op->SysReg.PStateField = PStateField;
2144  Op->StartLoc = S;
2145  Op->EndLoc = S;
2146  return Op;
2147  }
2148 
2149  static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2150  SMLoc E, MCContext &Ctx) {
2151  auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2152  Op->SysCRImm.Val = Val;
2153  Op->StartLoc = S;
2154  Op->EndLoc = E;
2155  return Op;
2156  }
2157 
2158  static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2159  StringRef Str,
2160  SMLoc S,
2161  MCContext &Ctx) {
2162  auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2163  Op->Prefetch.Val = Val;
2164  Op->Barrier.Data = Str.data();
2165  Op->Barrier.Length = Str.size();
2166  Op->StartLoc = S;
2167  Op->EndLoc = S;
2168  return Op;
2169  }
2170 
2171  static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2172  StringRef Str,
2173  SMLoc S,
2174  MCContext &Ctx) {
2175  auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2176  Op->PSBHint.Val = Val;
2177  Op->PSBHint.Data = Str.data();
2178  Op->PSBHint.Length = Str.size();
2179  Op->StartLoc = S;
2180  Op->EndLoc = S;
2181  return Op;
2182  }
2183 
2184  static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2185  StringRef Str,
2186  SMLoc S,
2187  MCContext &Ctx) {
2188  auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2189  Op->BTIHint.Val = Val | 32;
2190  Op->BTIHint.Data = Str.data();
2191  Op->BTIHint.Length = Str.size();
2192  Op->StartLoc = S;
2193  Op->EndLoc = S;
2194  return Op;
2195  }
2196 
2197  static std::unique_ptr<AArch64Operand>
2198  CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2199  SMLoc S, SMLoc E, MCContext &Ctx) {
2200  auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2201  Op->MatrixReg.RegNum = RegNum;
2202  Op->MatrixReg.ElementWidth = ElementWidth;
2203  Op->MatrixReg.Kind = Kind;
2204  Op->StartLoc = S;
2205  Op->EndLoc = E;
2206  return Op;
2207  }
2208 
2209  static std::unique_ptr<AArch64Operand>
2210  CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2211  auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2212  Op->SVCR.PStateField = PStateField;
2213  Op->SVCR.Data = Str.data();
2214  Op->SVCR.Length = Str.size();
2215  Op->StartLoc = S;
2216  Op->EndLoc = S;
2217  return Op;
2218  }
2219 
2220  static std::unique_ptr<AArch64Operand>
2221  CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2222  bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2223  auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2224  Op->ShiftExtend.Type = ShOp;
2225  Op->ShiftExtend.Amount = Val;
2226  Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2227  Op->StartLoc = S;
2228  Op->EndLoc = E;
2229  return Op;
2230  }
2231 };
2232 
2233 } // end anonymous namespace.
2234 
2235 void AArch64Operand::print(raw_ostream &OS) const {
2236  switch (Kind) {
2237  case k_FPImm:
2238  OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2239  if (!getFPImmIsExact())
2240  OS << " (inexact)";
2241  OS << ">";
2242  break;
2243  case k_Barrier: {
2244  StringRef Name = getBarrierName();
2245  if (!Name.empty())
2246  OS << "<barrier " << Name << ">";
2247  else
2248  OS << "<barrier invalid #" << getBarrier() << ">";
2249  break;
2250  }
2251  case k_Immediate:
2252  OS << *getImm();
2253  break;
2254  case k_ShiftedImm: {
2255  unsigned Shift = getShiftedImmShift();
2256  OS << "<shiftedimm ";
2257  OS << *getShiftedImmVal();
2258  OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2259  break;
2260  }
2261  case k_CondCode:
2262  OS << "<condcode " << getCondCode() << ">";
2263  break;
2264  case k_VectorList: {
2265  OS << "<vectorlist ";
2266  unsigned Reg = getVectorListStart();
2267  for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2268  OS << Reg + i << " ";
2269  OS << ">";
2270  break;
2271  }
2272  case k_VectorIndex:
2273  OS << "<vectorindex " << getVectorIndex() << ">";
2274  break;
2275  case k_SysReg:
2276  OS << "<sysreg: " << getSysReg() << '>';
2277  break;
2278  case k_Token:
2279  OS << "'" << getToken() << "'";
2280  break;
2281  case k_SysCR:
2282  OS << "c" << getSysCR();
2283  break;
2284  case k_Prefetch: {
2285  StringRef Name = getPrefetchName();
2286  if (!Name.empty())
2287  OS << "<prfop " << Name << ">";
2288  else
2289  OS << "<prfop invalid #" << getPrefetch() << ">";
2290  break;
2291  }
2292  case k_PSBHint:
2293  OS << getPSBHintName();
2294  break;
2295  case k_BTIHint:
2296  OS << getBTIHintName();
2297  break;
2298  case k_MatrixRegister:
2299  OS << "<matrix " << getMatrixReg() << ">";
2300  break;
2301  case k_MatrixTileList: {
2302  OS << "<matrixlist ";
2303  unsigned RegMask = getMatrixTileListRegMask();
2304  unsigned MaxBits = 8;
2305  for (unsigned I = MaxBits; I > 0; --I)
2306  OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2307  OS << '>';
2308  break;
2309  }
2310  case k_SVCR: {
2311  OS << getSVCR();
2312  break;
2313  }
2314  case k_Register:
2315  OS << "<register " << getReg() << ">";
2316  if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2317  break;
2318  LLVM_FALLTHROUGH;
2319  case k_ShiftExtend:
2320  OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2321  << getShiftExtendAmount();
2322  if (!hasShiftExtendAmount())
2323  OS << "<imp>";
2324  OS << '>';
2325  break;
2326  }
2327 }
2328 
2329 /// @name Auto-generated Match Functions
2330 /// {
2331 
2332 static unsigned MatchRegisterName(StringRef Name);
2333 
2334 /// }
2335 
2336 static unsigned MatchNeonVectorRegName(StringRef Name) {
2337  return StringSwitch<unsigned>(Name.lower())
2338  .Case("v0", AArch64::Q0)
2339  .Case("v1", AArch64::Q1)
2340  .Case("v2", AArch64::Q2)
2341  .Case("v3", AArch64::Q3)
2342  .Case("v4", AArch64::Q4)
2343  .Case("v5", AArch64::Q5)
2344  .Case("v6", AArch64::Q6)
2345  .Case("v7", AArch64::Q7)
2346  .Case("v8", AArch64::Q8)
2347  .Case("v9", AArch64::Q9)
2348  .Case("v10", AArch64::Q10)
2349  .Case("v11", AArch64::Q11)
2350  .Case("v12", AArch64::Q12)
2351  .Case("v13", AArch64::Q13)
2352  .Case("v14", AArch64::Q14)
2353  .Case("v15", AArch64::Q15)
2354  .Case("v16", AArch64::Q16)
2355  .Case("v17", AArch64::Q17)
2356  .Case("v18", AArch64::Q18)
2357  .Case("v19", AArch64::Q19)
2358  .Case("v20", AArch64::Q20)
2359  .Case("v21", AArch64::Q21)
2360  .Case("v22", AArch64::Q22)
2361  .Case("v23", AArch64::Q23)
2362  .Case("v24", AArch64::Q24)
2363  .Case("v25", AArch64::Q25)
2364  .Case("v26", AArch64::Q26)
2365  .Case("v27", AArch64::Q27)
2366  .Case("v28", AArch64::Q28)
2367  .Case("v29", AArch64::Q29)
2368  .Case("v30", AArch64::Q30)
2369  .Case("v31", AArch64::Q31)
2370  .Default(0);
2371 }
2372 
2373 /// Returns an optional pair of (#elements, element-width) if Suffix
2374 /// is a valid vector kind. Where the number of elements in a vector
2375 /// or the vector width is implicit or explicitly unknown (but still a
2376 /// valid suffix kind), 0 is used.
2377 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2378  RegKind VectorKind) {
2379  std::pair<int, int> Res = {-1, -1};
2380 
2381  switch (VectorKind) {
2382  case RegKind::NeonVector:
2383  Res =
2384  StringSwitch<std::pair<int, int>>(Suffix.lower())
2385  .Case("", {0, 0})
2386  .Case(".1d", {1, 64})
2387  .Case(".1q", {1, 128})
2388  // '.2h' needed for fp16 scalar pairwise reductions
2389  .Case(".2h", {2, 16})
2390  .Case(".2s", {2, 32})
2391  .Case(".2d", {2, 64})
2392  // '.4b' is another special case for the ARMv8.2a dot product
2393  // operand
2394  .Case(".4b", {4, 8})
2395  .Case(".4h", {4, 16})
2396  .Case(".4s", {4, 32})
2397  .Case(".8b", {8, 8})
2398  .Case(".8h", {8, 16})
2399  .Case(".16b", {16, 8})
2400  // Accept the width neutral ones, too, for verbose syntax. If those
2401  // aren't used in the right places, the token operand won't match so
2402  // all will work out.
2403  .Case(".b", {0, 8})
2404  .Case(".h", {0, 16})
2405  .Case(".s", {0, 32})
2406  .Case(".d", {0, 64})
2407  .Default({-1, -1});
2408  break;
2409  case RegKind::SVEPredicateVector:
2410  case RegKind::SVEDataVector:
2411  case RegKind::Matrix:
2412  Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2413  .Case("", {0, 0})
2414  .Case(".b", {0, 8})
2415  .Case(".h", {0, 16})
2416  .Case(".s", {0, 32})
2417  .Case(".d", {0, 64})
2418  .Case(".q", {0, 128})
2419  .Default({-1, -1});
2420  break;
2421  default:
2422  llvm_unreachable("Unsupported RegKind");
2423  }
2424 
2425  if (Res == std::make_pair(-1, -1))
2426  return Optional<std::pair<int, int>>();
2427 
2428  return Optional<std::pair<int, int>>(Res);
2429 }
2430 
2431 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2432  return parseVectorKind(Suffix, VectorKind).hasValue();
2433 }
2434 
2435 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2436  return StringSwitch<unsigned>(Name.lower())
2437  .Case("z0", AArch64::Z0)
2438  .Case("z1", AArch64::Z1)
2439  .Case("z2", AArch64::Z2)
2440  .Case("z3", AArch64::Z3)
2441  .Case("z4", AArch64::Z4)
2442  .Case("z5", AArch64::Z5)
2443  .Case("z6", AArch64::Z6)
2444  .Case("z7", AArch64::Z7)
2445  .Case("z8", AArch64::Z8)
2446  .Case("z9", AArch64::Z9)
2447  .Case("z10", AArch64::Z10)
2448  .Case("z11", AArch64::Z11)
2449  .Case("z12", AArch64::Z12)
2450  .Case("z13", AArch64::Z13)
2451  .Case("z14", AArch64::Z14)
2452  .Case("z15", AArch64::Z15)
2453  .Case("z16", AArch64::Z16)
2454  .Case("z17", AArch64::Z17)
2455  .Case("z18", AArch64::Z18)
2456  .Case("z19", AArch64::Z19)
2457  .Case("z20", AArch64::Z20)
2458  .Case("z21", AArch64::Z21)
2459  .Case("z22", AArch64::Z22)
2460  .Case("z23", AArch64::Z23)
2461  .Case("z24", AArch64::Z24)
2462  .Case("z25", AArch64::Z25)
2463  .Case("z26", AArch64::Z26)
2464  .Case("z27", AArch64::Z27)
2465  .Case("z28", AArch64::Z28)
2466  .Case("z29", AArch64::Z29)
2467  .Case("z30", AArch64::Z30)
2468  .Case("z31", AArch64::Z31)
2469  .Default(0);
2470 }
2471 
2472 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2473  return StringSwitch<unsigned>(Name.lower())
2474  .Case("p0", AArch64::P0)
2475  .Case("p1", AArch64::P1)
2476  .Case("p2", AArch64::P2)
2477  .Case("p3", AArch64::P3)
2478  .Case("p4", AArch64::P4)
2479  .Case("p5", AArch64::P5)
2480  .Case("p6", AArch64::P6)
2481  .Case("p7", AArch64::P7)
2482  .Case("p8", AArch64::P8)
2483  .Case("p9", AArch64::P9)
2484  .Case("p10", AArch64::P10)
2485  .Case("p11", AArch64::P11)
2486  .Case("p12", AArch64::P12)
2487  .Case("p13", AArch64::P13)
2488  .Case("p14", AArch64::P14)
2489  .Case("p15", AArch64::P15)
2490  .Default(0);
2491 }
2492 
2493 static unsigned matchMatrixTileListRegName(StringRef Name) {
2494  return StringSwitch<unsigned>(Name.lower())
2495  .Case("za0.d", AArch64::ZAD0)
2496  .Case("za1.d", AArch64::ZAD1)
2497  .Case("za2.d", AArch64::ZAD2)
2498  .Case("za3.d", AArch64::ZAD3)
2499  .Case("za4.d", AArch64::ZAD4)
2500  .Case("za5.d", AArch64::ZAD5)
2501  .Case("za6.d", AArch64::ZAD6)
2502  .Case("za7.d", AArch64::ZAD7)
2503  .Case("za0.s", AArch64::ZAS0)
2504  .Case("za1.s", AArch64::ZAS1)
2505  .Case("za2.s", AArch64::ZAS2)
2506  .Case("za3.s", AArch64::ZAS3)
2507  .Case("za0.h", AArch64::ZAH0)
2508  .Case("za1.h", AArch64::ZAH1)
2509  .Case("za0.b", AArch64::ZAB0)
2510  .Default(0);
2511 }
2512 
2513 static unsigned matchMatrixRegName(StringRef Name) {
2514  return StringSwitch<unsigned>(Name.lower())
2515  .Case("za", AArch64::ZA)
2516  .Case("za0.q", AArch64::ZAQ0)
2517  .Case("za1.q", AArch64::ZAQ1)
2518  .Case("za2.q", AArch64::ZAQ2)
2519  .Case("za3.q", AArch64::ZAQ3)
2520  .Case("za4.q", AArch64::ZAQ4)
2521  .Case("za5.q", AArch64::ZAQ5)
2522  .Case("za6.q", AArch64::ZAQ6)
2523  .Case("za7.q", AArch64::ZAQ7)
2524  .Case("za8.q", AArch64::ZAQ8)
2525  .Case("za9.q", AArch64::ZAQ9)
2526  .Case("za10.q", AArch64::ZAQ10)
2527  .Case("za11.q", AArch64::ZAQ11)
2528  .Case("za12.q", AArch64::ZAQ12)
2529  .Case("za13.q", AArch64::ZAQ13)
2530  .Case("za14.q", AArch64::ZAQ14)
2531  .Case("za15.q", AArch64::ZAQ15)
2532  .Case("za0.d", AArch64::ZAD0)
2533  .Case("za1.d", AArch64::ZAD1)
2534  .Case("za2.d", AArch64::ZAD2)
2535  .Case("za3.d", AArch64::ZAD3)
2536  .Case("za4.d", AArch64::ZAD4)
2537  .Case("za5.d", AArch64::ZAD5)
2538  .Case("za6.d", AArch64::ZAD6)
2539  .Case("za7.d", AArch64::ZAD7)
2540  .Case("za0.s", AArch64::ZAS0)
2541  .Case("za1.s", AArch64::ZAS1)
2542  .Case("za2.s", AArch64::ZAS2)
2543  .Case("za3.s", AArch64::ZAS3)
2544  .Case("za0.h", AArch64::ZAH0)
2545  .Case("za1.h", AArch64::ZAH1)
2546  .Case("za0.b", AArch64::ZAB0)
2547  .Case("za0h.q", AArch64::ZAQ0)
2548  .Case("za1h.q", AArch64::ZAQ1)
2549  .Case("za2h.q", AArch64::ZAQ2)
2550  .Case("za3h.q", AArch64::ZAQ3)
2551  .Case("za4h.q", AArch64::ZAQ4)
2552  .Case("za5h.q", AArch64::ZAQ5)
2553  .Case("za6h.q", AArch64::ZAQ6)
2554  .Case("za7h.q", AArch64::ZAQ7)
2555  .Case("za8h.q", AArch64::ZAQ8)
2556  .Case("za9h.q", AArch64::ZAQ9)
2557  .Case("za10h.q", AArch64::ZAQ10)
2558  .Case("za11h.q", AArch64::ZAQ11)
2559  .Case("za12h.q", AArch64::ZAQ12)
2560  .Case("za13h.q", AArch64::ZAQ13)
2561  .Case("za14h.q", AArch64::ZAQ14)
2562  .Case("za15h.q", AArch64::ZAQ15)
2563  .Case("za0h.d", AArch64::ZAD0)
2564  .Case("za1h.d", AArch64::ZAD1)
2565  .Case("za2h.d", AArch64::ZAD2)
2566  .Case("za3h.d", AArch64::ZAD3)
2567  .Case("za4h.d", AArch64::ZAD4)
2568  .Case("za5h.d", AArch64::ZAD5)
2569  .Case("za6h.d", AArch64::ZAD6)
2570  .Case("za7h.d", AArch64::ZAD7)
2571  .Case("za0h.s", AArch64::ZAS0)
2572  .Case("za1h.s", AArch64::ZAS1)
2573  .Case("za2h.s", AArch64::ZAS2)
2574  .Case("za3h.s", AArch64::ZAS3)
2575  .Case("za0h.h", AArch64::ZAH0)
2576  .Case("za1h.h", AArch64::ZAH1)
2577  .Case("za0h.b", AArch64::ZAB0)
2578  .Case("za0v.q", AArch64::ZAQ0)
2579  .Case("za1v.q", AArch64::ZAQ1)
2580  .Case("za2v.q", AArch64::ZAQ2)
2581  .Case("za3v.q", AArch64::ZAQ3)
2582  .Case("za4v.q", AArch64::ZAQ4)
2583  .Case("za5v.q", AArch64::ZAQ5)
2584  .Case("za6v.q", AArch64::ZAQ6)
2585  .Case("za7v.q", AArch64::ZAQ7)
2586  .Case("za8v.q", AArch64::ZAQ8)
2587  .Case("za9v.q", AArch64::ZAQ9)
2588  .Case("za10v.q", AArch64::ZAQ10)
2589  .Case("za11v.q", AArch64::ZAQ11)
2590  .Case("za12v.q", AArch64::ZAQ12)
2591  .Case("za13v.q", AArch64::ZAQ13)
2592  .Case("za14v.q", AArch64::ZAQ14)
2593  .Case("za15v.q", AArch64::ZAQ15)
2594  .Case("za0v.d", AArch64::ZAD0)
2595  .Case("za1v.d", AArch64::ZAD1)
2596  .Case("za2v.d", AArch64::ZAD2)
2597  .Case("za3v.d", AArch64::ZAD3)
2598  .Case("za4v.d", AArch64::ZAD4)
2599  .Case("za5v.d", AArch64::ZAD5)
2600  .Case("za6v.d", AArch64::ZAD6)
2601  .Case("za7v.d", AArch64::ZAD7)
2602  .Case("za0v.s", AArch64::ZAS0)
2603  .Case("za1v.s", AArch64::ZAS1)
2604  .Case("za2v.s", AArch64::ZAS2)
2605  .Case("za3v.s", AArch64::ZAS3)
2606  .Case("za0v.h", AArch64::ZAH0)
2607  .Case("za1v.h", AArch64::ZAH1)
2608  .Case("za0v.b", AArch64::ZAB0)
2609  .Default(0);
2610 }
2611 
2612 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2613  SMLoc &EndLoc) {
2614  return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
2615 }
2616 
2617 OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2618  SMLoc &StartLoc,
2619  SMLoc &EndLoc) {
2620  StartLoc = getLoc();
2621  auto Res = tryParseScalarRegister(RegNo);
2622  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2623  return Res;
2624 }
2625 
2626 // Matches a register name or register alias previously defined by '.req'
2627 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2628  RegKind Kind) {
2629  unsigned RegNum = 0;
2630  if ((RegNum = matchSVEDataVectorRegName(Name)))
2631  return Kind == RegKind::SVEDataVector ? RegNum : 0;
2632 
2633  if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2634  return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2635 
2636  if ((RegNum = MatchNeonVectorRegName(Name)))
2637  return Kind == RegKind::NeonVector ? RegNum : 0;
2638 
2639  if ((RegNum = matchMatrixRegName(Name)))
2640  return Kind == RegKind::Matrix ? RegNum : 0;
2641 
2642  // The parsed register must be of RegKind Scalar
2643  if ((RegNum = MatchRegisterName(Name)))
2644  return Kind == RegKind::Scalar ? RegNum : 0;
2645 
2646  if (!RegNum) {
2647  // Handle a few common aliases of registers.
2648  if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2649  .Case("fp", AArch64::FP)
2650  .Case("lr", AArch64::LR)
2651  .Case("x31", AArch64::XZR)
2652  .Case("w31", AArch64::WZR)
2653  .Default(0))
2654  return Kind == RegKind::Scalar ? RegNum : 0;
2655 
2656  // Check for aliases registered via .req. Canonicalize to lower case.
2657  // That's more consistent since register names are case insensitive, and
2658  // it's how the original entry was passed in from MC/MCParser/AsmParser.
2659  auto Entry = RegisterReqs.find(Name.lower());
2660  if (Entry == RegisterReqs.end())
2661  return 0;
2662 
2663  // set RegNum if the match is the right kind of register
2664  if (Kind == Entry->getValue().first)
2665  RegNum = Entry->getValue().second;
2666  }
2667  return RegNum;
2668 }
2669 
2670 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2671 /// Identifier when called, and if it is a register name the token is eaten and
2672 /// the register is added to the operand list.
2674 AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2675  const AsmToken &Tok = getTok();
2676  if (Tok.isNot(AsmToken::Identifier))
2677  return MatchOperand_NoMatch;
2678 
2679  std::string lowerCase = Tok.getString().lower();
2680  unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2681  if (Reg == 0)
2682  return MatchOperand_NoMatch;
2683 
2684  RegNum = Reg;
2685  Lex(); // Eat identifier token.
2686  return MatchOperand_Success;
2687 }
2688 
2689 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2691 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2692  SMLoc S = getLoc();
2693 
2694  if (getTok().isNot(AsmToken::Identifier)) {
2695  Error(S, "Expected cN operand where 0 <= N <= 15");
2696  return MatchOperand_ParseFail;
2697  }
2698 
2699  StringRef Tok = getTok().getIdentifier();
2700  if (Tok[0] != 'c' && Tok[0] != 'C') {
2701  Error(S, "Expected cN operand where 0 <= N <= 15");
2702  return MatchOperand_ParseFail;
2703  }
2704 
2705  uint32_t CRNum;
2706  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2707  if (BadNum || CRNum > 15) {
2708  Error(S, "Expected cN operand where 0 <= N <= 15");
2709  return MatchOperand_ParseFail;
2710  }
2711 
2712  Lex(); // Eat identifier token.
2713  Operands.push_back(
2714  AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2715  return MatchOperand_Success;
2716 }
2717 
2718 /// tryParsePrefetch - Try to parse a prefetch operand.
2719 template <bool IsSVEPrefetch>
2721 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2722  SMLoc S = getLoc();
2723  const AsmToken &Tok = getTok();
2724 
2725  auto LookupByName = [](StringRef N) {
2726  if (IsSVEPrefetch) {
2727  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2728  return Optional<unsigned>(Res->Encoding);
2729  } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2730  return Optional<unsigned>(Res->Encoding);
2731  return Optional<unsigned>();
2732  };
2733 
2734  auto LookupByEncoding = [](unsigned E) {
2735  if (IsSVEPrefetch) {
2736  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2737  return Optional<StringRef>(Res->Name);
2738  } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2739  return Optional<StringRef>(Res->Name);
2740  return Optional<StringRef>();
2741  };
2742  unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2743 
2744  // Either an identifier for named values or a 5-bit immediate.
2745  // Eat optional hash.
2746  if (parseOptionalToken(AsmToken::Hash) ||
2747  Tok.is(AsmToken::Integer)) {
2748  const MCExpr *ImmVal;
2749  if (getParser().parseExpression(ImmVal))
2750  return MatchOperand_ParseFail;
2751 
2752  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2753  if (!MCE) {
2754  TokError("immediate value expected for prefetch operand");
2755  return MatchOperand_ParseFail;
2756  }
2757  unsigned prfop = MCE->getValue();
2758  if (prfop > MaxVal) {
2759  TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2760  "] expected");
2761  return MatchOperand_ParseFail;
2762  }
2763 
2764  auto PRFM = LookupByEncoding(MCE->getValue());
2765  Operands.push_back(AArch64Operand::CreatePrefetch(
2766  prfop, PRFM.getValueOr(""), S, getContext()));
2767  return MatchOperand_Success;
2768  }
2769 
2770  if (Tok.isNot(AsmToken::Identifier)) {
2771  TokError("prefetch hint expected");
2772  return MatchOperand_ParseFail;
2773  }
2774 
2775  auto PRFM = LookupByName(Tok.getString());
2776  if (!PRFM) {
2777  TokError("prefetch hint expected");
2778  return MatchOperand_ParseFail;
2779  }
2780 
2781  Operands.push_back(AArch64Operand::CreatePrefetch(
2782  *PRFM, Tok.getString(), S, getContext()));
2783  Lex(); // Eat identifier token.
2784  return MatchOperand_Success;
2785 }
2786 
2787 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2789 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2790  SMLoc S = getLoc();
2791  const AsmToken &Tok = getTok();
2792  if (Tok.isNot(AsmToken::Identifier)) {
2793  TokError("invalid operand for instruction");
2794  return MatchOperand_ParseFail;
2795  }
2796 
2797  auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2798  if (!PSB) {
2799  TokError("invalid operand for instruction");
2800  return MatchOperand_ParseFail;
2801  }
2802 
2803  Operands.push_back(AArch64Operand::CreatePSBHint(
2804  PSB->Encoding, Tok.getString(), S, getContext()));
2805  Lex(); // Eat identifier token.
2806  return MatchOperand_Success;
2807 }
2808 
2809 /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2811 AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2812  SMLoc S = getLoc();
2813  const AsmToken &Tok = getTok();
2814  if (Tok.isNot(AsmToken::Identifier)) {
2815  TokError("invalid operand for instruction");
2816  return MatchOperand_ParseFail;
2817  }
2818 
2819  auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2820  if (!BTI) {
2821  TokError("invalid operand for instruction");
2822  return MatchOperand_ParseFail;
2823  }
2824 
2825  Operands.push_back(AArch64Operand::CreateBTIHint(
2826  BTI->Encoding, Tok.getString(), S, getContext()));
2827  Lex(); // Eat identifier token.
2828  return MatchOperand_Success;
2829 }
2830 
2831 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2832 /// instruction.
2834 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2835  SMLoc S = getLoc();
2836  const MCExpr *Expr = nullptr;
2837 
2838  if (getTok().is(AsmToken::Hash)) {
2839  Lex(); // Eat hash token.
2840  }
2841 
2842  if (parseSymbolicImmVal(Expr))
2843  return MatchOperand_ParseFail;
2844 
2845  AArch64MCExpr::VariantKind ELFRefKind;
2846  MCSymbolRefExpr::VariantKind DarwinRefKind;
2847  int64_t Addend;
2848  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2849  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2850  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2851  // No modifier was specified at all; this is the syntax for an ELF basic
2852  // ADRP relocation (unfortunately).
2853  Expr =
2855  } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2856  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2857  Addend != 0) {
2858  Error(S, "gotpage label reference not allowed an addend");
2859  return MatchOperand_ParseFail;
2860  } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2861  DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2862  DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2863  ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2864  ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2865  ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
2866  ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2867  ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2868  // The operand must be an @page or @gotpage qualified symbolref.
2869  Error(S, "page or gotpage label reference expected");
2870  return MatchOperand_ParseFail;
2871  }
2872  }
2873 
2874  // We have either a label reference possibly with addend or an immediate. The
2875  // addend is a raw value here. The linker will adjust it to only reference the
2876  // page.
2877  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2878  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2879 
2880  return MatchOperand_Success;
2881 }
2882 
2883 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2884 /// instruction.
2886 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2887  SMLoc S = getLoc();
2888  const MCExpr *Expr = nullptr;
2889 
2890  // Leave anything with a bracket to the default for SVE
2891  if (getTok().is(AsmToken::LBrac))
2892  return MatchOperand_NoMatch;
2893 
2894  if (getTok().is(AsmToken::Hash))
2895  Lex(); // Eat hash token.
2896 
2897  if (parseSymbolicImmVal(Expr))
2898  return MatchOperand_ParseFail;
2899 
2900  AArch64MCExpr::VariantKind ELFRefKind;
2901  MCSymbolRefExpr::VariantKind DarwinRefKind;
2902  int64_t Addend;
2903  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2904  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2905  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2906  // No modifier was specified at all; this is the syntax for an ELF basic
2907  // ADR relocation (unfortunately).
2908  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2909  } else {
2910  Error(S, "unexpected adr label");
2911  return MatchOperand_ParseFail;
2912  }
2913  }
2914 
2915  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2916  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2917  return MatchOperand_Success;
2918 }
2919 
2920 /// tryParseFPImm - A floating point immediate expression operand.
2921 template<bool AddFPZeroAsLiteral>
2923 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2924  SMLoc S = getLoc();
2925 
2926  bool Hash = parseOptionalToken(AsmToken::Hash);
2927 
2928  // Handle negation, as that still comes through as a separate token.
2929  bool isNegative = parseOptionalToken(AsmToken::Minus);
2930 
2931  const AsmToken &Tok = getTok();
2932  if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2933  if (!Hash)
2934  return MatchOperand_NoMatch;
2935  TokError("invalid floating point immediate");
2936  return MatchOperand_ParseFail;
2937  }
2938 
2939  // Parse hexadecimal representation.
2940  if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2941  if (Tok.getIntVal() > 255 || isNegative) {
2942  TokError("encoded floating point value out of range");
2943  return MatchOperand_ParseFail;
2944  }
2945 
2946  APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2947  Operands.push_back(
2948  AArch64Operand::CreateFPImm(F, true, S, getContext()));
2949  } else {
2950  // Parse FP representation.
2951  APFloat RealVal(APFloat::IEEEdouble());
2952  auto StatusOrErr =
2953  RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2954  if (errorToBool(StatusOrErr.takeError())) {
2955  TokError("invalid floating point representation");
2956  return MatchOperand_ParseFail;
2957  }
2958 
2959  if (isNegative)
2960  RealVal.changeSign();
2961 
2962  if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2963  Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
2964  Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
2965  } else
2966  Operands.push_back(AArch64Operand::CreateFPImm(
2967  RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2968  }
2969 
2970  Lex(); // Eat the token.
2971 
2972  return MatchOperand_Success;
2973 }
2974 
2975 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2976 /// a shift suffix, for example '#1, lsl #12'.
2978 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2979  SMLoc S = getLoc();
2980 
2981  if (getTok().is(AsmToken::Hash))
2982  Lex(); // Eat '#'
2983  else if (getTok().isNot(AsmToken::Integer))
2984  // Operand should start from # or should be integer, emit error otherwise.
2985  return MatchOperand_NoMatch;
2986 
2987  const MCExpr *Imm = nullptr;
2988  if (parseSymbolicImmVal(Imm))
2989  return MatchOperand_ParseFail;
2990  else if (getTok().isNot(AsmToken::Comma)) {
2991  Operands.push_back(
2992  AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
2993  return MatchOperand_Success;
2994  }
2995 
2996  // Eat ','
2997  Lex();
2998 
2999  // The optional operand must be "lsl #N" where N is non-negative.
3000  if (!getTok().is(AsmToken::Identifier) ||
3001  !getTok().getIdentifier().equals_insensitive("lsl")) {
3002  Error(getLoc(), "only 'lsl #+N' valid after immediate");
3003  return MatchOperand_ParseFail;
3004  }
3005 
3006  // Eat 'lsl'
3007  Lex();
3008 
3009  parseOptionalToken(AsmToken::Hash);
3010 
3011  if (getTok().isNot(AsmToken::Integer)) {
3012  Error(getLoc(), "only 'lsl #+N' valid after immediate");
3013  return MatchOperand_ParseFail;
3014  }
3015 
3016  int64_t ShiftAmount = getTok().getIntVal();
3017 
3018  if (ShiftAmount < 0) {
3019  Error(getLoc(), "positive shift amount required");
3020  return MatchOperand_ParseFail;
3021  }
3022  Lex(); // Eat the number
3023 
3024  // Just in case the optional lsl #0 is used for immediates other than zero.
3025  if (ShiftAmount == 0 && Imm != nullptr) {
3026  Operands.push_back(
3027  AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3028  return MatchOperand_Success;
3029  }
3030 
3031  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3032  getLoc(), getContext()));
3033  return MatchOperand_Success;
3034 }
3035 
3036 /// parseCondCodeString - Parse a Condition Code string.
3037 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
3039  .Case("eq", AArch64CC::EQ)
3040  .Case("ne", AArch64CC::NE)
3041  .Case("cs", AArch64CC::HS)
3042  .Case("hs", AArch64CC::HS)
3043  .Case("cc", AArch64CC::LO)
3044  .Case("lo", AArch64CC::LO)
3045  .Case("mi", AArch64CC::MI)
3046  .Case("pl", AArch64CC::PL)
3047  .Case("vs", AArch64CC::VS)
3048  .Case("vc", AArch64CC::VC)
3049  .Case("hi", AArch64CC::HI)
3050  .Case("ls", AArch64CC::LS)
3051  .Case("ge", AArch64CC::GE)
3052  .Case("lt", AArch64CC::LT)
3053  .Case("gt", AArch64CC::GT)
3054  .Case("le", AArch64CC::LE)
3055  .Case("al", AArch64CC::AL)
3056  .Case("nv", AArch64CC::NV)
3058 
3059  if (CC == AArch64CC::Invalid &&
3060  getSTI().getFeatureBits()[AArch64::FeatureSVE])
3062  .Case("none", AArch64CC::EQ)
3063  .Case("any", AArch64CC::NE)
3064  .Case("nlast", AArch64CC::HS)
3065  .Case("last", AArch64CC::LO)
3066  .Case("first", AArch64CC::MI)
3067  .Case("nfrst", AArch64CC::PL)
3068  .Case("pmore", AArch64CC::HI)
3069  .Case("plast", AArch64CC::LS)
3070  .Case("tcont", AArch64CC::GE)
3071  .Case("tstop", AArch64CC::LT)
3073 
3074  return CC;
3075 }
3076 
3077 /// parseCondCode - Parse a Condition Code operand.
3078 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3079  bool invertCondCode) {
3080  SMLoc S = getLoc();
3081  const AsmToken &Tok = getTok();
3082  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3083 
3084  StringRef Cond = Tok.getString();
3085  AArch64CC::CondCode CC = parseCondCodeString(Cond);
3086  if (CC == AArch64CC::Invalid)
3087  return TokError("invalid condition code");
3088  Lex(); // Eat identifier token.
3089 
3090  if (invertCondCode) {
3091  if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3092  return TokError("condition codes AL and NV are invalid for this instruction");
3094  }
3095 
3096  Operands.push_back(
3097  AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3098  return false;
3099 }
3100 
3102 AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3103  const AsmToken &Tok = getTok();
3104  SMLoc S = getLoc();
3105 
3106  if (Tok.isNot(AsmToken::Identifier)) {
3107  TokError("invalid operand for instruction");
3108  return MatchOperand_ParseFail;
3109  }
3110 
3111  unsigned PStateImm = -1;
3112  const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3113  if (SVCR && SVCR->haveFeatures(getSTI().getFeatureBits()))
3114  PStateImm = SVCR->Encoding;
3115 
3116  Operands.push_back(
3117  AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3118  Lex(); // Eat identifier token.
3119  return MatchOperand_Success;
3120 }
3121 
3123 AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3124  const AsmToken &Tok = getTok();
3125  SMLoc S = getLoc();
3126 
3127  StringRef Name = Tok.getString();
3128 
3129  if (Name.equals_insensitive("za")) {
3130  Lex(); // eat "za"
3131  Operands.push_back(AArch64Operand::CreateMatrixRegister(
3132  AArch64::ZA, /*ElementWidth=*/0, MatrixKind::Array, S, getLoc(),
3133  getContext()));
3134  if (getLexer().is(AsmToken::LBrac)) {
3135  // There's no comma after matrix operand, so we can parse the next operand
3136  // immediately.
3137  if (parseOperand(Operands, false, false))
3138  return MatchOperand_NoMatch;
3139  }
3140  return MatchOperand_Success;
3141  }
3142 
3143  // Try to parse matrix register.
3144  unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3145  if (!Reg)
3146  return MatchOperand_NoMatch;
3147 
3148  size_t DotPosition = Name.find('.');
3149  assert(DotPosition != StringRef::npos && "Unexpected register");
3150 
3151  StringRef Head = Name.take_front(DotPosition);
3152  StringRef Tail = Name.drop_front(DotPosition);
3153  StringRef RowOrColumn = Head.take_back();
3154 
3155  MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn)
3156  .Case("h", MatrixKind::Row)
3157  .Case("v", MatrixKind::Col)
3158  .Default(MatrixKind::Tile);
3159 
3160  // Next up, parsing the suffix
3161  const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3162  if (!KindRes) {
3163  TokError("Expected the register to be followed by element width suffix");
3164  return MatchOperand_ParseFail;
3165  }
3166  unsigned ElementWidth = KindRes->second;
3167 
3168  Lex();
3169 
3170  Operands.push_back(AArch64Operand::CreateMatrixRegister(
3171  Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3172 
3173  if (getLexer().is(AsmToken::LBrac)) {
3174  // There's no comma after matrix operand, so we can parse the next operand
3175  // immediately.
3176  if (parseOperand(Operands, false, false))
3177  return MatchOperand_NoMatch;
3178  }
3179  return MatchOperand_Success;
3180 }
3181 
3182 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3183 /// them if present.
3185 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3186  const AsmToken &Tok = getTok();
3187  std::string LowerID = Tok.getString().lower();
3190  .Case("lsl", AArch64_AM::LSL)
3191  .Case("lsr", AArch64_AM::LSR)
3192  .Case("asr", AArch64_AM::ASR)
3193  .Case("ror", AArch64_AM::ROR)
3194  .Case("msl", AArch64_AM::MSL)
3195  .Case("uxtb", AArch64_AM::UXTB)
3196  .Case("uxth", AArch64_AM::UXTH)
3197  .Case("uxtw", AArch64_AM::UXTW)
3198  .Case("uxtx", AArch64_AM::UXTX)
3199  .Case("sxtb", AArch64_AM::SXTB)
3200  .Case("sxth", AArch64_AM::SXTH)
3201  .Case("sxtw", AArch64_AM::SXTW)
3202  .Case("sxtx", AArch64_AM::SXTX)
3204 
3205  if (ShOp == AArch64_AM::InvalidShiftExtend)
3206  return MatchOperand_NoMatch;
3207 
3208  SMLoc S = Tok.getLoc();
3209  Lex();
3210 
3211  bool Hash = parseOptionalToken(AsmToken::Hash);
3212 
3213  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3214  if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3215  ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3216  ShOp == AArch64_AM::MSL) {
3217  // We expect a number here.
3218  TokError("expected #imm after shift specifier");
3219  return MatchOperand_ParseFail;
3220  }
3221 
3222  // "extend" type operations don't need an immediate, #0 is implicit.
3223  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3224  Operands.push_back(
3225  AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3226  return MatchOperand_Success;
3227  }
3228 
3229  // Make sure we do actually have a number, identifier or a parenthesized
3230  // expression.
3231  SMLoc E = getLoc();
3232  if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3233  !getTok().is(AsmToken::Identifier)) {
3234  Error(E, "expected integer shift amount");
3235  return MatchOperand_ParseFail;
3236  }
3237 
3238  const MCExpr *ImmVal;
3239  if (getParser().parseExpression(ImmVal))
3240  return MatchOperand_ParseFail;
3241 
3242  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3243  if (!MCE) {
3244  Error(E, "expected constant '#imm' after shift specifier");
3245  return MatchOperand_ParseFail;
3246  }
3247 
3248  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3249  Operands.push_back(AArch64Operand::CreateShiftExtend(
3250  ShOp, MCE->getValue(), true, S, E, getContext()));
3251  return MatchOperand_Success;
3252 }
3253 
3254 static const struct Extension {
3255  const char *Name;
3257 } ExtensionMap[] = {
3258  {"crc", {AArch64::FeatureCRC}},
3259  {"sm4", {AArch64::FeatureSM4}},
3260  {"sha3", {AArch64::FeatureSHA3}},
3261  {"sha2", {AArch64::FeatureSHA2}},
3262  {"aes", {AArch64::FeatureAES}},
3263  {"crypto", {AArch64::FeatureCrypto}},
3264  {"fp", {AArch64::FeatureFPARMv8}},
3265  {"simd", {AArch64::FeatureNEON}},
3266  {"ras", {AArch64::FeatureRAS}},
3267  {"lse", {AArch64::FeatureLSE}},
3268  {"predres", {AArch64::FeaturePredRes}},
3269  {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3270  {"mte", {AArch64::FeatureMTE}},
3271  {"memtag", {AArch64::FeatureMTE}},
3272  {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3273  {"pan", {AArch64::FeaturePAN}},
3274  {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3275  {"ccpp", {AArch64::FeatureCCPP}},
3276  {"rcpc", {AArch64::FeatureRCPC}},
3277  {"rng", {AArch64::FeatureRandGen}},
3278  {"sve", {AArch64::FeatureSVE}},
3279  {"sve2", {AArch64::FeatureSVE2}},
3280  {"sve2-aes", {AArch64::FeatureSVE2AES}},
3281  {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3282  {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3283  {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3284  {"ls64", {AArch64::FeatureLS64}},
3285  {"xs", {AArch64::FeatureXS}},
3286  {"pauth", {AArch64::FeaturePAuth}},
3287  {"flagm", {AArch64::FeatureFlagM}},
3288  {"rme", {AArch64::FeatureRME}},
3289  {"sme", {AArch64::FeatureSME}},
3290  {"sme-f64", {AArch64::FeatureSMEF64}},
3291  {"sme-i64", {AArch64::FeatureSMEI64}},
3292  // FIXME: Unsupported extensions
3293  {"lor", {}},
3294  {"rdma", {}},
3295  {"profile", {}},
3296 };
3297 
3298 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3299  if (FBS[AArch64::HasV8_1aOps])
3300  Str += "ARMv8.1a";
3301  else if (FBS[AArch64::HasV8_2aOps])
3302  Str += "ARMv8.2a";
3303  else if (FBS[AArch64::HasV8_3aOps])
3304  Str += "ARMv8.3a";
3305  else if (FBS[AArch64::HasV8_4aOps])
3306  Str += "ARMv8.4a";
3307  else if (FBS[AArch64::HasV8_5aOps])
3308  Str += "ARMv8.5a";
3309  else if (FBS[AArch64::HasV8_6aOps])
3310  Str += "ARMv8.6a";
3311  else if (FBS[AArch64::HasV8_7aOps])
3312  Str += "ARMv8.7a";
3313  else if (FBS[AArch64::HasV9_0aOps])
3314  Str += "ARMv9-a";
3315  else if (FBS[AArch64::HasV9_1aOps])
3316  Str += "ARMv9.1a";
3317  else if (FBS[AArch64::HasV9_2aOps])
3318  Str += "ARMv9.2a";
3319  else {
3320  SmallVector<std::string, 2> ExtMatches;
3321  for (const auto& Ext : ExtensionMap) {
3322  // Use & in case multiple features are enabled
3323  if ((FBS & Ext.Features) != FeatureBitset())
3324  ExtMatches.push_back(Ext.Name);
3325  }
3326  Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3327  }
3328 }
3329 
3330 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3331  SMLoc S) {
3332  const uint16_t Op2 = Encoding & 7;
3333  const uint16_t Cm = (Encoding & 0x78) >> 3;
3334  const uint16_t Cn = (Encoding & 0x780) >> 7;
3335  const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3336 
3337  const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3338 
3339  Operands.push_back(
3340  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3341  Operands.push_back(
3342  AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3343  Operands.push_back(
3344  AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3345  Expr = MCConstantExpr::create(Op2, getContext());
3346  Operands.push_back(
3347  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3348 }
3349 
3350 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3351 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3352 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3354  if (Name.find('.') != StringRef::npos)
3355  return TokError("invalid operand");
3356 
3357  Mnemonic = Name;
3358  Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3359 
3360  const AsmToken &Tok = getTok();
3361  StringRef Op = Tok.getString();
3362  SMLoc S = Tok.getLoc();
3363 
3364  if (Mnemonic == "ic") {
3365  const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3366  if (!IC)
3367  return TokError("invalid operand for IC instruction");
3368  else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3369  std::string Str("IC " + std::string(IC->Name) + " requires: ");
3371  return TokError(Str);
3372  }
3373  createSysAlias(IC->Encoding, Operands, S);
3374  } else if (Mnemonic == "dc") {
3375  const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3376  if (!DC)
3377  return TokError("invalid operand for DC instruction");
3378  else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3379  std::string Str("DC " + std::string(DC->Name) + " requires: ");
3380  setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3381  return TokError(Str);
3382  }
3383  createSysAlias(DC->Encoding, Operands, S);
3384  } else if (Mnemonic == "at") {
3385  const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3386  if (!AT)
3387  return TokError("invalid operand for AT instruction");
3388  else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3389  std::string Str("AT " + std::string(AT->Name) + " requires: ");
3391  return TokError(Str);
3392  }
3393  createSysAlias(AT->Encoding, Operands, S);
3394  } else if (Mnemonic == "tlbi") {
3395  const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3396  if (!TLBI)
3397  return TokError("invalid operand for TLBI instruction");
3398  else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3399  std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3401  return TokError(Str);
3402  }
3403  createSysAlias(TLBI->Encoding, Operands, S);
3404  } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
3405  const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
3406  if (!PRCTX)
3407  return TokError("invalid operand for prediction restriction instruction");
3408  else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
3409  std::string Str(
3410  Mnemonic.upper() + std::string(PRCTX->Name) + " requires: ");
3412  return TokError(Str);
3413  }
3414  uint16_t PRCTX_Op2 =
3415  Mnemonic == "cfp" ? 4 :
3416  Mnemonic == "dvp" ? 5 :
3417  Mnemonic == "cpp" ? 7 :
3418  0;
3419  assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction");
3420  createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
3421  }
3422 
3423  Lex(); // Eat operand.
3424 
3425  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3426  bool HasRegister = false;
3427 
3428  // Check for the optional register operand.
3429  if (parseOptionalToken(AsmToken::Comma)) {
3430  if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3431  return TokError("expected register operand");
3432  HasRegister = true;
3433  }
3434 
3435  if (ExpectRegister && !HasRegister)
3436  return TokError("specified " + Mnemonic + " op requires a register");
3437  else if (!ExpectRegister && HasRegister)
3438  return TokError("specified " + Mnemonic + " op does not use a register");
3439 
3440  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3441  return true;
3442 
3443  return false;
3444 }
3445 
3447 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3448  MCAsmParser &Parser = getParser();
3449  const AsmToken &Tok = getTok();
3450 
3451  if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3452  TokError("'csync' operand expected");
3453  return MatchOperand_ParseFail;
3454  } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3455  // Immediate operand.
3456  const MCExpr *ImmVal;
3457  SMLoc ExprLoc = getLoc();
3458  AsmToken IntTok = Tok;
3459  if (getParser().parseExpression(ImmVal))
3460  return MatchOperand_ParseFail;
3461  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3462  if (!MCE) {
3463  Error(ExprLoc, "immediate value expected for barrier operand");
3464  return MatchOperand_ParseFail;
3465  }
3466  int64_t Value = MCE->getValue();
3467  if (Mnemonic == "dsb" && Value > 15) {
3468  // This case is a no match here, but it might be matched by the nXS
3469  // variant. Deliberately not unlex the optional '#' as it is not necessary
3470  // to characterize an integer immediate.
3471  Parser.getLexer().UnLex(IntTok);
3472  return MatchOperand_NoMatch;
3473  }
3474  if (Value < 0 || Value > 15) {
3475  Error(ExprLoc, "barrier operand out of range");
3476  return MatchOperand_ParseFail;
3477  }
3478  auto DB = AArch64DB::lookupDBByEncoding(Value);
3479  Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3480  ExprLoc, getContext(),
3481  false /*hasnXSModifier*/));
3482  return MatchOperand_Success;
3483  }
3484 
3485  if (Tok.isNot(AsmToken::Identifier)) {
3486  TokError("invalid operand for instruction");
3487  return MatchOperand_ParseFail;
3488  }
3489 
3490  StringRef Operand = Tok.getString();
3491  auto TSB = AArch64TSB::lookupTSBByName(Operand);
3492  auto DB = AArch64DB::lookupDBByName(Operand);
3493  // The only valid named option for ISB is 'sy'
3494  if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3495  TokError("'sy' or #imm operand expected");
3496  return MatchOperand_ParseFail;
3497  // The only valid named option for TSB is 'csync'
3498  } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3499  TokError("'csync' operand expected");
3500  return MatchOperand_ParseFail;
3501  } else if (!DB && !TSB) {
3502  if (Mnemonic == "dsb") {
3503  // This case is a no match here, but it might be matched by the nXS
3504  // variant.
3505  return MatchOperand_NoMatch;
3506  }
3507  TokError("invalid barrier option name");
3508  return MatchOperand_ParseFail;
3509  }
3510 
3511  Operands.push_back(AArch64Operand::CreateBarrier(
3512  DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3513  getContext(), false /*hasnXSModifier*/));
3514  Lex(); // Consume the option
3515 
3516  return MatchOperand_Success;
3517 }
3518 
3520 AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3521  const AsmToken &Tok = getTok();
3522 
3523  assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
3524  if (Mnemonic != "dsb")
3525  return MatchOperand_ParseFail;
3526 
3527  if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3528  // Immediate operand.
3529  const MCExpr *ImmVal;
3530  SMLoc ExprLoc = getLoc();
3531  if (getParser().parseExpression(ImmVal))
3532  return MatchOperand_ParseFail;
3533  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3534  if (!MCE) {
3535  Error(ExprLoc, "immediate value expected for barrier operand");
3536  return MatchOperand_ParseFail;
3537  }
3538  int64_t Value = MCE->getValue();
3539  // v8.7-A DSB in the nXS variant accepts only the following immediate
3540  // values: 16, 20, 24, 28.
3541  if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
3542  Error(ExprLoc, "barrier operand out of range");
3543  return MatchOperand_ParseFail;
3544  }
3545  auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
3546  Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
3547  ExprLoc, getContext(),
3548  true /*hasnXSModifier*/));
3549  return MatchOperand_Success;
3550  }
3551 
3552  if (Tok.isNot(AsmToken::Identifier)) {
3553  TokError("invalid operand for instruction");
3554  return MatchOperand_ParseFail;
3555  }
3556 
3557  StringRef Operand = Tok.getString();
3558  auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
3559 
3560  if (!DB) {
3561  TokError("invalid barrier option name");
3562  return MatchOperand_ParseFail;
3563  }
3564 
3565  Operands.push_back(
3566  AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
3567  getContext(), true /*hasnXSModifier*/));
3568  Lex(); // Consume the option
3569 
3570  return MatchOperand_Success;
3571 }
3572 
3574 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3575  const AsmToken &Tok = getTok();
3576 
3577  if (Tok.isNot(AsmToken::Identifier))
3578  return MatchOperand_NoMatch;
3579 
3580  if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
3581  return MatchOperand_NoMatch;
3582 
3583  int MRSReg, MSRReg;
3584  auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3585  if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3586  MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3587  MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3588  } else
3589  MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3590 
3591  auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3592  unsigned PStateImm = -1;
3593  if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3594  PStateImm = PState->Encoding;
3595 
3596  Operands.push_back(
3597  AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3598  PStateImm, getContext()));
3599  Lex(); // Eat identifier
3600 
3601  return MatchOperand_Success;
3602 }
3603 
3604 /// tryParseNeonVectorRegister - Parse a vector register operand.
3605 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3606  if (getTok().isNot(AsmToken::Identifier))
3607  return true;
3608 
3609  SMLoc S = getLoc();
3610  // Check for a vector register specifier first.
3611  StringRef Kind;
3612  unsigned Reg;
3613  OperandMatchResultTy Res =
3614  tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3615  if (Res != MatchOperand_Success)
3616  return true;
3617 
3618  const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3619  if (!KindRes)
3620  return true;
3621 
3622  unsigned ElementWidth = KindRes->second;
3623  Operands.push_back(
3624  AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3625  S, getLoc(), getContext()));
3626 
3627  // If there was an explicit qualifier, that goes on as a literal text
3628  // operand.
3629  if (!Kind.empty())
3630  Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
3631 
3632  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3633 }
3634 
3636 AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3637  SMLoc SIdx = getLoc();
3638  if (parseOptionalToken(AsmToken::LBrac)) {
3639  const MCExpr *ImmVal;
3640  if (getParser().parseExpression(ImmVal))
3641  return MatchOperand_NoMatch;
3642  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3643  if (!MCE) {
3644  TokError("immediate value expected for vector index");
3645  return MatchOperand_ParseFail;;
3646  }
3647 
3648  SMLoc E = getLoc();
3649 
3650  if (parseToken(AsmToken::RBrac, "']' expected"))
3651  return MatchOperand_ParseFail;;
3652 
3653  Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3654  E, getContext()));
3655  return MatchOperand_Success;
3656  }
3657 
3658  return MatchOperand_NoMatch;
3659 }
3660 
3661 // tryParseVectorRegister - Try to parse a vector register name with
3662 // optional kind specifier. If it is a register specifier, eat the token
3663 // and return it.
3665 AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3666  RegKind MatchKind) {
3667  const AsmToken &Tok = getTok();
3668 
3669  if (Tok.isNot(AsmToken::Identifier))
3670  return MatchOperand_NoMatch;
3671 
3672  StringRef Name = Tok.getString();
3673  // If there is a kind specifier, it's separated from the register name by
3674  // a '.'.
3675  size_t Start = 0, Next = Name.find('.');
3676  StringRef Head = Name.slice(Start, Next);
3677  unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3678 
3679  if (RegNum) {
3680  if (Next != StringRef::npos) {
3681  Kind = Name.slice(Next, StringRef::npos);
3682  if (!isValidVectorKind(Kind, MatchKind)) {
3683  TokError("invalid vector kind qualifier");
3684  return MatchOperand_ParseFail;
3685  }
3686  }
3687  Lex(); // Eat the register token.
3688 
3689  Reg = RegNum;
3690  return MatchOperand_Success;
3691  }
3692 
3693  return MatchOperand_NoMatch;
3694 }
3695 
3696 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3698 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3699  // Check for a SVE predicate register specifier first.
3700  const SMLoc S = getLoc();
3701  StringRef Kind;
3702  unsigned RegNum;
3703  auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3704  if (Res != MatchOperand_Success)
3705  return Res;
3706 
3707  const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3708  if (!KindRes)
3709  return MatchOperand_NoMatch;
3710 
3711  unsigned ElementWidth = KindRes->second;
3712  Operands.push_back(AArch64Operand::CreateVectorReg(
3713  RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3714  getLoc(), getContext()));
3715 
3716  if (getLexer().is(AsmToken::LBrac)) {
3717  // Indexed predicate, there's no comma so try parse the next operand
3718  // immediately.
3719  if (parseOperand(Operands, false, false))
3720  return MatchOperand_NoMatch;
3721  }
3722 
3723  // Not all predicates are followed by a '/m' or '/z'.
3724  if (getTok().isNot(AsmToken::Slash))
3725  return MatchOperand_Success;
3726 
3727  // But when they do they shouldn't have an element type suffix.
3728  if (!Kind.empty()) {
3729  Error(S, "not expecting size suffix");
3730  return MatchOperand_ParseFail;
3731  }
3732 
3733  // Add a literal slash as operand
3734  Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
3735 
3736  Lex(); // Eat the slash.
3737 
3738  // Zeroing or merging?
3739  auto Pred = getTok().getString().lower();
3740  if (Pred != "z" && Pred != "m") {
3741  Error(getLoc(), "expecting 'm' or 'z' predication");
3742  return MatchOperand_ParseFail;
3743  }
3744 
3745  // Add zero/merge token.
3746  const char *ZM = Pred == "z" ? "z" : "m";
3747  Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
3748 
3749  Lex(); // Eat zero/merge token.
3750  return MatchOperand_Success;
3751 }
3752 
3753 /// parseRegister - Parse a register operand.
3754 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3755  // Try for a Neon vector register.
3756  if (!tryParseNeonVectorRegister(Operands))
3757  return false;
3758 
3759  // Otherwise try for a scalar register.
3760  if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3761  return false;
3762 
3763  return true;
3764 }
3765 
3766 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3767  bool HasELFModifier = false;
3769 
3770  if (parseOptionalToken(AsmToken::Colon)) {
3771  HasELFModifier = true;
3772 
3773  if (getTok().isNot(AsmToken::Identifier))
3774  return TokError("expect relocation specifier in operand after ':'");
3775 
3776  std::string LowerCase = getTok().getIdentifier().lower();
3777  RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3778  .Case("lo12", AArch64MCExpr::VK_LO12)
3779  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3780  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3781  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3782  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3783  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3784  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3785  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3786  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3787  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3788  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3789  .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3790  .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3791  .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3792  .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3793  .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3794  .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3795  .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3796  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3797  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3798  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3799  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3800  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3801  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3802  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3803  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3804  .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3805  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3806  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3807  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3808  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3809  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3810  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3811  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3812  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3813  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3815  .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
3816  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3818  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3819  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3820  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3822  .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3823  .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3825 
3826  if (RefKind == AArch64MCExpr::VK_INVALID)
3827  return TokError("expect relocation specifier in operand after ':'");
3828 
3829  Lex(); // Eat identifier
3830 
3831  if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3832  return true;
3833  }
3834 
3835  if (getParser().parseExpression(ImmVal))
3836  return true;
3837 
3838  if (HasELFModifier)
3839  ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3840 
3841  return false;
3842 }
3843 
3845 AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
3846  if (getTok().isNot(AsmToken::LCurly))
3847  return MatchOperand_NoMatch;
3848 
3849  auto ParseMatrixTile = [this](unsigned &Reg, unsigned &ElementWidth) {
3850  StringRef Name = getTok().getString();
3851  size_t DotPosition = Name.find('.');
3852  if (DotPosition == StringRef::npos)
3853  return MatchOperand_NoMatch;
3854 
3855  unsigned RegNum = matchMatrixTileListRegName(Name);
3856  if (!RegNum)
3857  return MatchOperand_NoMatch;
3858 
3859  StringRef Tail = Name.drop_front(DotPosition);
3860  const Optional<std::pair<int, int>> &KindRes =
3862  if (!KindRes) {
3863  TokError("Expected the register to be followed by element width suffix");
3864  return MatchOperand_ParseFail;
3865  }
3866  ElementWidth = KindRes->second;
3867  Reg = RegNum;
3868  Lex(); // Eat the register.
3869  return MatchOperand_Success;
3870  };
3871 
3872  SMLoc S = getLoc();
3873  auto LCurly = getTok();
3874  Lex(); // Eat left bracket token.
3875 
3876  // Empty matrix list
3877  if (parseOptionalToken(AsmToken::RCurly)) {
3878  Operands.push_back(AArch64Operand::CreateMatrixTileList(
3879  /*RegMask=*/0, S, getLoc(), getContext()));
3880  return MatchOperand_Success;
3881  }
3882 
3883  // Try parse {za} alias early
3884  if (getTok().getString().equals_insensitive("za")) {
3885  Lex(); // Eat 'za'
3886 
3887  if (parseToken(AsmToken::RCurly, "'}' expected"))
3888  return MatchOperand_ParseFail;
3889 
3890  Operands.push_back(AArch64Operand::CreateMatrixTileList(
3891  /*RegMask=*/0xFF, S, getLoc(), getContext()));
3892  return MatchOperand_Success;
3893  }
3894 
3895  SMLoc TileLoc = getLoc();
3896 
3897  unsigned FirstReg, ElementWidth;
3898  auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
3899  if (ParseRes != MatchOperand_Success) {
3900  getLexer().UnLex(LCurly);
3901  return ParseRes;
3902  }
3903 
3904  const MCRegisterInfo *RI = getContext().getRegisterInfo();
3905 
3906  unsigned PrevReg = FirstReg;
3907  unsigned Count = 1;
3908 
3909  SmallSet<unsigned, 8> DRegs;
3910  AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
3911 
3912  SmallSet<unsigned, 8> SeenRegs;
3913  SeenRegs.insert(FirstReg);
3914 
3915  while (parseOptionalToken(AsmToken::Comma)) {
3916  TileLoc = getLoc();
3917  unsigned Reg, NextElementWidth;
3918  ParseRes = ParseMatrixTile(Reg, NextElementWidth);
3919  if (ParseRes != MatchOperand_Success)
3920  return ParseRes;
3921 
3922  // Element size must match on all regs in the list.
3923  if (ElementWidth != NextElementWidth) {
3924  Error(TileLoc, "mismatched register size suffix");
3925  return MatchOperand_ParseFail;
3926  }
3927 
3928  if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
3929  Warning(TileLoc, "tile list not in ascending order");
3930 
3931  if (SeenRegs.contains(Reg))
3932  Warning(TileLoc, "duplicate tile in list");
3933  else {
3934  SeenRegs.insert(Reg);
3935  AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
3936  }
3937 
3938  PrevReg = Reg;
3939  ++Count;
3940  }
3941 
3942  if (parseToken(AsmToken::RCurly, "'}' expected"))
3943  return MatchOperand_ParseFail;
3944 
3945  unsigned RegMask = 0;
3946  for (auto Reg : DRegs)
3947  RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
3948  RI->getEncodingValue(AArch64::ZAD0));
3949  Operands.push_back(
3950  AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
3951 
3952  return MatchOperand_Success;
3953 }
3954 
3955 template <RegKind VectorKind>
3957 AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3958  bool ExpectMatch) {
3959  MCAsmParser &Parser = getParser();
3960  if (!getTok().is(AsmToken::LCurly))
3961  return MatchOperand_NoMatch;
3962 
3963  // Wrapper around parse function
3964  auto ParseVector = [this](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3965  bool NoMatchIsError) {
3966  auto RegTok = getTok();
3967  auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3968  if (ParseRes == MatchOperand_Success) {
3969  if (parseVectorKind(Kind, VectorKind))
3970  return ParseRes;
3971  llvm_unreachable("Expected a valid vector kind");
3972  }
3973 
3974  if (RegTok.isNot(AsmToken::Identifier) ||
3975  ParseRes == MatchOperand_ParseFail ||
3976  (ParseRes == MatchOperand_NoMatch && NoMatchIsError &&
3977  !RegTok.getString().startswith_insensitive("za"))) {
3978  Error(Loc, "vector register expected");
3979  return MatchOperand_ParseFail;
3980  }
3981 
3982  return MatchOperand_NoMatch;
3983  };
3984 
3985  SMLoc S = getLoc();
3986  auto LCurly = getTok();
3987  Lex(); // Eat left bracket token.
3988 
3989  StringRef Kind;
3990  unsigned FirstReg;
3991  auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3992 
3993  // Put back the original left bracket if there was no match, so that
3994  // different types of list-operands can be matched (e.g. SVE, Neon).
3995  if (ParseRes == MatchOperand_NoMatch)
3996  Parser.getLexer().UnLex(LCurly);
3997 
3998  if (ParseRes != MatchOperand_Success)
3999  return ParseRes;
4000 
4001  int64_t PrevReg = FirstReg;
4002  unsigned Count = 1;
4003 
4004  if (parseOptionalToken(AsmToken::Minus)) {
4005  SMLoc Loc = getLoc();
4006  StringRef NextKind;
4007 
4008  unsigned Reg;
4009  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4010  if (ParseRes != MatchOperand_Success)
4011  return ParseRes;
4012 
4013  // Any Kind suffices must match on all regs in the list.
4014  if (Kind != NextKind) {
4015  Error(Loc, "mismatched register size suffix");
4016  return MatchOperand_ParseFail;
4017  }
4018 
4019  unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
4020 
4021  if (Space == 0 || Space > 3) {
4022  Error(Loc, "invalid number of vectors");
4023  return MatchOperand_ParseFail;
4024  }
4025 
4026  Count += Space;
4027  }
4028  else {
4029  while (parseOptionalToken(AsmToken::Comma)) {
4030  SMLoc Loc = getLoc();
4031  StringRef NextKind;
4032  unsigned Reg;
4033  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4034  if (ParseRes != MatchOperand_Success)
4035  return ParseRes;
4036 
4037  // Any Kind suffices must match on all regs in the list.
4038  if (Kind != NextKind) {
4039  Error(Loc, "mismatched register size suffix");
4040  return MatchOperand_ParseFail;
4041  }
4042 
4043  // Registers must be incremental (with wraparound at 31)
4044  if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
4045  (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
4046  Error(Loc, "registers must be sequential");
4047  return MatchOperand_ParseFail;
4048  }
4049 
4050  PrevReg = Reg;
4051  ++Count;
4052  }
4053  }
4054 
4055  if (parseToken(AsmToken::RCurly, "'}' expected"))
4056  return MatchOperand_ParseFail;
4057 
4058  if (Count > 4) {
4059  Error(S, "invalid number of vectors");
4060  return MatchOperand_ParseFail;
4061  }
4062 
4063  unsigned NumElements = 0;
4064  unsigned ElementWidth = 0;
4065  if (!Kind.empty()) {
4066  if (const auto &VK = parseVectorKind(Kind, VectorKind))
4067  std::tie(NumElements, ElementWidth) = *VK;
4068  }
4069 
4070  Operands.push_back(AArch64Operand::CreateVectorList(
4071  FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
4072  getContext()));
4073 
4074  return MatchOperand_Success;
4075 }
4076 
4077 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4078 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4079  auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4080  if (ParseRes != MatchOperand_Success)
4081  return true;
4082 
4083  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
4084 }
4085 
4087 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4088  SMLoc StartLoc = getLoc();
4089 
4090  unsigned RegNum;
4091  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4092  if (Res != MatchOperand_Success)
4093  return Res;
4094 
4095  if (!parseOptionalToken(AsmToken::Comma)) {
4096  Operands.push_back(AArch64Operand::CreateReg(
4097  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4098  return MatchOperand_Success;
4099  }
4100 
4101  parseOptionalToken(AsmToken::Hash);
4102 
4103  if (getTok().isNot(AsmToken::Integer)) {
4104  Error(getLoc(), "index must be absent or #0");
4105  return MatchOperand_ParseFail;
4106  }
4107 
4108  const MCExpr *ImmVal;
4109  if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4110  cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
4111  Error(getLoc(), "index must be absent or #0");
4112  return MatchOperand_ParseFail;
4113  }
4114 
4115  Operands.push_back(AArch64Operand::CreateReg(
4116  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4117  return MatchOperand_Success;
4118 }
4119 
4120 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4122 AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4123  SMLoc StartLoc = getLoc();
4124 
4125  unsigned RegNum;
4126  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4127  if (Res != MatchOperand_Success)
4128  return Res;
4129 
4130  // No shift/extend is the default.
4131  if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4132  Operands.push_back(AArch64Operand::CreateReg(
4133  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4134  return MatchOperand_Success;
4135  }
4136 
4137  // Eat the comma
4138  Lex();
4139 
4140  // Match the shift
4142  Res = tryParseOptionalShiftExtend(ExtOpnd);
4143  if (Res != MatchOperand_Success)
4144  return Res;
4145 
4146  auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4147  Operands.push_back(AArch64Operand::CreateReg(
4148  RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4149  Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4150  Ext->hasShiftExtendAmount()));
4151 
4152  return MatchOperand_Success;
4153 }
4154 
4155 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4156  MCAsmParser &Parser = getParser();
4157 
4158  // Some SVE instructions have a decoration after the immediate, i.e.
4159  // "mul vl". We parse them here and add tokens, which must be present in the
4160  // asm string in the tablegen instruction.
4161  bool NextIsVL =
4162  Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4163  bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4164  if (!getTok().getString().equals_insensitive("mul") ||
4165  !(NextIsVL || NextIsHash))
4166  return true;
4167 
4168  Operands.push_back(
4169  AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4170  Lex(); // Eat the "mul"
4171 
4172  if (NextIsVL) {
4173  Operands.push_back(
4174  AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4175  Lex(); // Eat the "vl"
4176  return false;
4177  }
4178 
4179  if (NextIsHash) {
4180  Lex(); // Eat the #
4181  SMLoc S = getLoc();
4182 
4183  // Parse immediate operand.
4184  const MCExpr *ImmVal;
4185  if (!Parser.parseExpression(ImmVal))
4186  if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4187  Operands.push_back(AArch64Operand::CreateImm(
4188  MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4189  getContext()));
4190  return MatchOperand_Success;
4191  }
4192  }
4193 
4194  return Error(getLoc(), "expected 'vl' or '#<imm>'");
4195 }
4196 
4197 bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4198  auto Tok = getTok();
4199  if (Tok.isNot(AsmToken::Identifier))
4200  return true;
4201 
4202  auto Keyword = Tok.getString();
4203  Keyword = StringSwitch<StringRef>(Keyword.lower())
4204  .Case("sm", "sm")
4205  .Case("za", "za")
4206  .Default(Keyword);
4207  Operands.push_back(
4208  AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4209 
4210  Lex();
4211  return false;
4212 }
4213 
4214 /// parseOperand - Parse a arm instruction operand. For now this parses the
4215 /// operand regardless of the mnemonic.
4216 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4217  bool invertCondCode) {
4218  MCAsmParser &Parser = getParser();
4219 
4220  OperandMatchResultTy ResTy =
4221  MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
4222 
4223  // Check if the current operand has a custom associated parser, if so, try to
4224  // custom parse the operand, or fallback to the general approach.
4225  if (ResTy == MatchOperand_Success)
4226  return false;
4227  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4228  // there was a match, but an error occurred, in which case, just return that
4229  // the operand parsing failed.
4230  if (ResTy == MatchOperand_ParseFail)
4231  return true;
4232 
4233  // Nothing custom, so do general case parsing.
4234  SMLoc S, E;
4235  switch (getLexer().getKind()) {
4236  default: {
4237  SMLoc S = getLoc();
4238  const MCExpr *Expr;
4239  if (parseSymbolicImmVal(Expr))
4240  return Error(S, "invalid operand");
4241 
4242  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4243  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4244  return false;
4245  }
4246  case AsmToken::LBrac: {
4247  Operands.push_back(
4248  AArch64Operand::CreateToken("[", getLoc(), getContext()));
4249  Lex(); // Eat '['
4250 
4251  // There's no comma after a '[', so we can parse the next operand
4252  // immediately.
4253  return parseOperand(Operands, false, false);
4254  }
4255  case AsmToken::LCurly: {
4256  if (!parseNeonVectorList(Operands))
4257  return false;
4258 
4259  Operands.push_back(
4260  AArch64Operand::CreateToken("{", getLoc(), getContext()));
4261  Lex(); // Eat '{'
4262 
4263  // There's no comma after a '{', so we can parse the next operand
4264  // immediately.
4265  return parseOperand(Operands, false, false);
4266  }
4267  case AsmToken::Identifier: {
4268  // If we're expecting a Condition Code operand, then just parse that.
4269  if (isCondCode)
4270  return parseCondCode(Operands, invertCondCode);
4271 
4272  // If it's a register name, parse it.
4273  if (!parseRegister(Operands))
4274  return false;
4275 
4276  // See if this is a "mul vl" decoration or "mul #<int>" operand used
4277  // by SVE instructions.
4278  if (!parseOptionalMulOperand(Operands))
4279  return false;
4280 
4281  // If this is an "smstart" or "smstop" instruction, parse its special
4282  // keyword operand as an identifier.
4283  if (Mnemonic == "smstart" || Mnemonic == "smstop")
4284  return parseKeywordOperand(Operands);
4285 
4286  // This could be an optional "shift" or "extend" operand.
4287  OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
4288  // We can only continue if no tokens were eaten.
4289  if (GotShift != MatchOperand_NoMatch)
4290  return GotShift;
4291 
4292  // If this is a two-word mnemonic, parse its special keyword
4293  // operand as an identifier.
4294  if (Mnemonic == "brb")
4295  return parseKeywordOperand(Operands);
4296 
4297  // This was not a register so parse other operands that start with an
4298  // identifier (like labels) as expressions and create them as immediates.
4299  const MCExpr *IdVal;
4300  S = getLoc();
4301  if (getParser().parseExpression(IdVal))
4302  return true;
4303  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4304  Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4305  return false;
4306  }
4307  case AsmToken::Integer:
4308  case AsmToken::Real:
4309  case AsmToken::Hash: {
4310  // #42 -> immediate.
4311  S = getLoc();
4312 
4313  parseOptionalToken(AsmToken::Hash);
4314 
4315  // Parse a negative sign
4316  bool isNegative = false;
4317  if (getTok().is(AsmToken::Minus)) {
4318  isNegative = true;
4319  // We need to consume this token only when we have a Real, otherwise
4320  // we let parseSymbolicImmVal take care of it
4321  if (Parser.getLexer().peekTok().is(AsmToken::Real))
4322  Lex();
4323  }
4324 
4325  // The only Real that should come through here is a literal #0.0 for
4326  // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4327  // so convert the value.
4328  const AsmToken &Tok = getTok();
4329  if (Tok.is(AsmToken::Real)) {
4330  APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4331  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4332  if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4333  Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4334  Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4335  return TokError("unexpected floating point literal");
4336  else if (IntVal != 0 || isNegative)
4337  return TokError("expected floating-point constant #0.0");
4338  Lex(); // Eat the token.
4339 
4340  Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4341  Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4342  return false;
4343  }
4344 
4345  const MCExpr *ImmVal;
4346  if (parseSymbolicImmVal(ImmVal))
4347  return true;
4348 
4349  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4350  Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4351  return false;
4352  }
4353  case AsmToken::Equal: {
4354  SMLoc Loc = getLoc();
4355  if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4356  return TokError("unexpected token in operand");
4357  Lex(); // Eat '='
4358  const MCExpr *SubExprVal;
4359  if (getParser().parseExpression(SubExprVal))
4360  return true;
4361 
4362  if (Operands.size() < 2 ||
4363  !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4364  return Error(Loc, "Only valid when first operand is register");
4365 
4366  bool IsXReg =
4367  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4368  Operands[1]->getReg());
4369 
4370  MCContext& Ctx = getContext();
4371  E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4372  // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4373  if (isa<MCConstantExpr>(SubExprVal)) {
4374  uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4375  uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4376  while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
4377  ShiftAmt += 16;
4378  Imm >>= 16;
4379  }
4380  if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4381  Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4382  Operands.push_back(AArch64Operand::CreateImm(
4383  MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4384  if (ShiftAmt)
4385  Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4386  ShiftAmt, true, S, E, Ctx));
4387  return false;
4388  }
4389  APInt Simm = APInt(64, Imm << ShiftAmt);
4390  // check if the immediate is an unsigned or signed 32-bit int for W regs
4391  if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4392  return Error(Loc, "Immediate too large for register");
4393  }
4394  // If it is a label or an imm that cannot fit in a movz, put it into CP.
4395  const MCExpr *CPLoc =
4396  getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4397  Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4398  return false;
4399  }
4400  }
4401 }
4402 
4403 bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4404  const MCExpr *Expr = nullptr;
4405  SMLoc L = getLoc();
4406  if (check(getParser().parseExpression(Expr), L, "expected expression"))
4407  return true;
4408  const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4409  if (check(!Value, L, "expected constant expression"))
4410  return true;
4411  Out = Value->getValue();
4412  return false;
4413 }
4414 
4415 bool AArch64AsmParser::parseComma() {
4416  if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
4417  return true;
4418  // Eat the comma
4419  Lex();
4420  return false;
4421 }
4422 
4423 bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
4424  unsigned First, unsigned Last) {
4425  unsigned Reg;
4426  SMLoc Start, End;
4427  if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register"))
4428  return true;
4429 
4430  // Special handling for FP and LR; they aren't linearly after x28 in
4431  // the registers enum.
4432  unsigned RangeEnd = Last;
4433  if (Base == AArch64::X0) {
4434  if (Last == AArch64::FP) {
4435  RangeEnd = AArch64::X28;
4436  if (Reg == AArch64::FP) {
4437  Out = 29;
4438  return false;
4439  }
4440  }
4441  if (Last == AArch64::LR) {
4442  RangeEnd = AArch64::X28;
4443  if (Reg == AArch64::FP) {
4444  Out = 29;
4445  return false;
4446  } else if (Reg == AArch64::LR) {
4447  Out = 30;
4448  return false;
4449  }
4450  }
4451  }
4452 
4453  if (check(Reg < First || Reg > RangeEnd, Start,
4454  Twine("expected register in range ") +
4455  AArch64InstPrinter::getRegisterName(First) + " to " +
4457  return true;
4458  Out = Reg - Base;
4459  return false;
4460 }
4461 
4462 bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
4463  const MCParsedAsmOperand &Op2) const {
4464  auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
4465  auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
4466  if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
4467  AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
4468  return MCTargetAsmParser::regsEqual(Op1, Op2);
4469 
4470  assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
4471  "Testing equality of non-scalar registers not supported");
4472 
4473  // Check if a registers match their sub/super register classes.
4474  if (AOp1.getRegEqualityTy() == EqualsSuperReg)
4475  return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
4476  if (AOp1.getRegEqualityTy() == EqualsSubReg)
4477  return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
4478  if (AOp2.getRegEqualityTy() == EqualsSuperReg)
4479  return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
4480  if (AOp2.getRegEqualityTy() == EqualsSubReg)
4481  return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
4482 
4483  return false;
4484 }
4485 
4486 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
4487 /// operands.
4488 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
4489  StringRef Name, SMLoc NameLoc,
4491  Name = StringSwitch<StringRef>(Name.lower())
4492  .Case("beq", "b.eq")
4493  .Case("bne", "b.ne")
4494  .Case("bhs", "b.hs")
4495  .Case("bcs", "b.cs")
4496  .Case("blo", "b.lo")
4497  .Case("bcc", "b.cc")
4498  .Case("bmi", "b.mi")
4499  .Case("bpl", "b.pl")
4500  .Case("bvs", "b.vs")
4501  .Case("bvc", "b.vc")
4502  .Case("bhi", "b.hi")
4503  .Case("bls", "b.ls")
4504  .Case("bge", "b.ge")
4505  .Case("blt", "b.lt")
4506  .Case("bgt", "b.gt")
4507  .Case("ble", "b.le")
4508  .Case("bal", "b.al")
4509  .Case("bnv", "b.nv")
4510  .Default(Name);
4511 
4512  // First check for the AArch64-specific .req directive.
4513  if (getTok().is(AsmToken::Identifier) &&
4514  getTok().getIdentifier().lower() == ".req") {
4515  parseDirectiveReq(Name, NameLoc);
4516  // We always return 'error' for this, as we're done with this
4517  // statement and don't need to match the 'instruction."
4518  return true;
4519  }
4520 
4521  // Create the leading tokens for the mnemonic, split by '.' characters.
4522  size_t Start = 0, Next = Name.find('.');
4523  StringRef Head = Name.slice(Start, Next);
4524 
4525  // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
4526  // the SYS instruction.
4527  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
4528  Head == "cfp" || Head == "dvp" || Head == "cpp")
4529  return parseSysAlias(Head, NameLoc, Operands);
4530 
4531  Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
4532  Mnemonic = Head;
4533 
4534  // Handle condition codes for a branch mnemonic
4535  if (Head == "b" && Next != StringRef::npos) {
4536  Start = Next;
4537  Next = Name.find('.', Start + 1);
4538  Head = Name.slice(Start + 1, Next);
4539 
4540  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4541  (Head.data() - Name.data()));
4542  AArch64CC::CondCode CC = parseCondCodeString(Head);
4543  if (CC == AArch64CC::Invalid)
4544  return Error(SuffixLoc, "invalid condition code");
4545  Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
4546  /*IsSuffix=*/true));
4547  Operands.push_back(
4548  AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
4549  }
4550 
4551  // Add the remaining tokens in the mnemonic.
4552  while (Next != StringRef::npos) {
4553  Start = Next;
4554  Next = Name.find('.', Start + 1);
4555  Head = Name.slice(Start, Next);
4556  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4557  (Head.data() - Name.data()) + 1);
4558  Operands.push_back(AArch64Operand::CreateToken(
4559  Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
4560  }
4561 
4562  // Conditional compare instructions have a Condition Code operand, which needs
4563  // to be parsed and an immediate operand created.
4564  bool condCodeFourthOperand =
4565  (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
4566  Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
4567  Head == "csinc" || Head == "csinv" || Head == "csneg");
4568 
4569  // These instructions are aliases to some of the conditional select
4570  // instructions. However, the condition code is inverted in the aliased
4571  // instruction.
4572  //
4573  // FIXME: Is this the correct way to handle these? Or should the parser
4574  // generate the aliased instructions directly?
4575  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
4576  bool condCodeThirdOperand =
4577  (Head == "cinc" || Head == "cinv" || Head == "cneg");
4578 
4579  // Read the remaining operands.
4580  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4581 
4582  unsigned N = 1;
4583  do {
4584  // Parse and remember the operand.
4585  if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
4586  (N == 3 && condCodeThirdOperand) ||
4587  (N == 2 && condCodeSecondOperand),
4588  condCodeSecondOperand || condCodeThirdOperand)) {
4589  return true;
4590  }
4591 
4592  // After successfully parsing some operands there are three special cases
4593  // to consider (i.e. notional operands not separated by commas). Two are
4594  // due to memory specifiers:
4595  // + An RBrac will end an address for load/store/prefetch
4596  // + An '!' will indicate a pre-indexed operation.
4597  //
4598  // And a further case is '}', which ends a group of tokens specifying the
4599  // SME accumulator array 'ZA' or tile vector, i.e.
4600  //
4601  // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
4602  //
4603  // It's someone else's responsibility to make sure these tokens are sane
4604  // in the given context!
4605 
4606  if (parseOptionalToken(AsmToken::RBrac))
4607  Operands.push_back(
4608  AArch64Operand::CreateToken("]", getLoc(), getContext()));
4609  if (parseOptionalToken(AsmToken::Exclaim))
4610  Operands.push_back(
4611  AArch64Operand::CreateToken("!", getLoc(), getContext()));
4612  if (parseOptionalToken(AsmToken::RCurly))
4613  Operands.push_back(
4614  AArch64Operand::CreateToken("}", getLoc(), getContext()));
4615 
4616  ++N;
4617  } while (parseOptionalToken(AsmToken::Comma));
4618  }
4619 
4620  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4621  return true;
4622 
4623  return false;
4624 }
4625 
4626 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
4627  assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
4628  return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
4629  (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
4630  (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
4631  (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
4632  (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
4633  (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
4634 }
4635 
4636 // FIXME: This entire function is a giant hack to provide us with decent
4637 // operand range validation/diagnostics until TableGen/MC can be extended
4638 // to support autogeneration of this kind of validation.
4639 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
4640  SmallVectorImpl<SMLoc> &Loc) {
4641  const MCRegisterInfo *RI = getContext().getRegisterInfo();
4642  const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
4643 
4644  // A prefix only applies to the instruction following it. Here we extract
4645  // prefix information for the next instruction before validating the current
4646  // one so that in the case of failure we don't erronously continue using the
4647  // current prefix.
4648  PrefixInfo Prefix = NextPrefix;
4649  NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
4650 
4651  // Before validating the instruction in isolation we run through the rules
4652  // applicable when it follows a prefix instruction.
4653  // NOTE: brk & hlt can be prefixed but require no additional validation.
4654  if (Prefix.isActive() &&
4655  (Inst.getOpcode() != AArch64::BRK) &&
4656  (Inst.getOpcode() != AArch64::HLT)) {
4657 
4658  // Prefixed intructions must have a destructive operand.
4661  return Error(IDLoc, "instruction is unpredictable when following a"
4662  " movprfx, suggest replacing movprfx with mov");
4663 
4664  // Destination operands must match.
4665  if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
4666  return Error(Loc[0], "instruction is unpredictable when following a"
4667  " movprfx writing to a different destination");
4668 
4669  // Destination operand must not be used in any other location.
4670  for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
4671  if (Inst.getOperand(i).isReg() &&
4672  (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
4673  isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
4674  return Error(Loc[0], "instruction is unpredictable when following a"
4675  " movprfx and destination also used as non-destructive"
4676  " source");
4677  }
4678 
4679  auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
4680  if (Prefix.isPredicated()) {
4681  int PgIdx = -1;
4682 
4683  // Find the instructions general predicate.
4684  for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
4685  if (Inst.getOperand(i).isReg() &&
4686  PPRRegClass.contains(Inst.getOperand(i).getReg())) {
4687  PgIdx = i;
4688  break;
4689  }
4690 
4691  // Instruction must be predicated if the movprfx is predicated.
4692  if (PgIdx == -1 ||
4694  return Error(IDLoc, "instruction is unpredictable when following a"
4695  " predicated movprfx, suggest using unpredicated movprfx");
4696 
4697  // Instruction must use same general predicate as the movprfx.
4698  if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
4699  return Error(IDLoc, "instruction is unpredictable when following a"
4700  " predicated movprfx using a different general predicate");
4701 
4702  // Instruction element type must match the movprfx.
4703  if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
4704  return Error(IDLoc, "instruction is unpredictable when following a"
4705  " predicated movprfx with a different element size");
4706  }
4707  }
4708 
4709  // Check for indexed addressing modes w/ the base register being the
4710  // same as a destination/source register or pair load where
4711  // the Rt == Rt2. All of those are undefined behaviour.
4712  switch (Inst.getOpcode()) {
4713  case AArch64::LDPSWpre:
4714  case AArch64::LDPWpost:
4715  case AArch64::LDPWpre:
4716  case AArch64::LDPXpost:
4717  case AArch64::LDPXpre: {
4718  unsigned Rt = Inst.getOperand(1).getReg();
4719  unsigned Rt2 = Inst.getOperand(2).getReg();
4720  unsigned Rn = Inst.getOperand(3).getReg();
4721  if (RI->isSubRegisterEq(Rn, Rt))
4722  return Error(Loc[0], "unpredictable LDP instruction, writeback base "
4723  "is also a destination");
4724  if (RI->isSubRegisterEq(Rn, Rt2))
4725  return Error(Loc[1], "unpredictable LDP instruction, writeback base "
4726  "is also a destination");
4728  }
4729  case AArch64::LDPDi:
4730  case AArch64::LDPQi:
4731  case AArch64::LDPSi:
4732  case AArch64::LDPSWi:
4733  case AArch64::LDPWi:
4734  case AArch64::LDPXi: {
4735  unsigned Rt = Inst.getOperand(0).getReg();
4736  unsigned Rt2 = Inst.getOperand(1).getReg();
4737  if (Rt == Rt2)
4738  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4739  break;
4740  }
4741  case AArch64::LDPDpost:
4742  case AArch64::LDPDpre:
4743  case AArch64::LDPQpost:
4744  case AArch64::LDPQpre:
4745  case AArch64::LDPSpost:
4746  case AArch64::LDPSpre:
4747  case AArch64::LDPSWpost: {
4748  unsigned Rt = Inst.getOperand(1).getReg();
4749  unsigned Rt2 = Inst.getOperand(2).getReg();
4750  if (Rt == Rt2)
4751  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4752  break;
4753  }
4754  case AArch64::STPDpost:
4755  case AArch64::STPDpre:
4756  case AArch64::STPQpost:
4757  case AArch64::STPQpre:
4758  case AArch64::STPSpost:
4759  case AArch64::STPSpre:
4760  case AArch64::STPWpost:
4761  case AArch64::STPWpre:
4762  case AArch64::STPXpost:
4763  case AArch64::STPXpre: {
4764  unsigned Rt = Inst.getOperand(1).getReg();
4765  unsigned Rt2 = Inst.getOperand(2).getReg();
4766  unsigned Rn = Inst.getOperand(3).getReg();
4767  if (RI->isSubRegisterEq(Rn, Rt))
4768  return Error(Loc[0], "unpredictable STP instruction, writeback base "
4769  "is also a source");
4770  if (RI->isSubRegisterEq(Rn, Rt2))
4771  return Error(Loc[1], "unpredictable STP instruction, writeback base "
4772  "is also a source");
4773  break;
4774  }
4775  case AArch64::LDRBBpre:
4776  case AArch64::LDRBpre:
4777  case AArch64::LDRHHpre:
4778  case AArch64::LDRHpre:
4779  case AArch64::LDRSBWpre:
4780  case AArch64::LDRSBXpre:
4781  case AArch64::LDRSHWpre:
4782  case AArch64::LDRSHXpre:
4783  case AArch64::LDRSWpre:
4784  case AArch64::LDRWpre:
4785  case AArch64::LDRXpre:
4786  case AArch64::LDRBBpost:
4787  case AArch64::LDRBpost:
4788  case AArch64::LDRHHpost:
4789  case AArch64::LDRHpost:
4790  case AArch64::LDRSBWpost:
4791  case AArch64::LDRSBXpost:
4792  case AArch64::LDRSHWpost:
4793  case AArch64::LDRSHXpost:
4794  case AArch64::LDRSWpost:
4795  case AArch64::LDRWpost:
4796  case AArch64::LDRXpost: {
4797  unsigned Rt = Inst.getOperand(1).getReg();
4798  unsigned Rn = Inst.getOperand(2).getReg();
4799  if (RI->isSubRegisterEq(Rn, Rt))
4800  return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4801  "is also a source");
4802  break;
4803  }
4804  case AArch64::STRBBpost:
4805  case AArch64::STRBpost:
4806  case AArch64::STRHHpost:
4807  case AArch64::STRHpost:
4808  case AArch64::STRWpost:
4809  case AArch64::STRXpost:
4810  case AArch64::STRBBpre:
4811  case AArch64::STRBpre:
4812  case AArch64::STRHHpre:
4813  case AArch64::STRHpre:
4814  case AArch64::STRWpre:
4815  case AArch64::STRXpre: {
4816  unsigned Rt = Inst.getOperand(1).getReg();
4817  unsigned Rn = Inst.getOperand(2).getReg();
4818  if (RI->isSubRegisterEq(Rn, Rt))
4819  return Error(Loc[0], "unpredictable STR instruction, writeback base "
4820  "is also a source");
4821  break;
4822  }
4823  case AArch64::STXRB:
4824  case AArch64::STXRH:
4825  case AArch64::STXRW:
4826  case AArch64::STXRX:
4827  case AArch64::STLXRB:
4828  case AArch64::STLXRH:
4829  case AArch64::STLXRW:
4830  case AArch64::STLXRX: {
4831  unsigned Rs = Inst.getOperand(0).getReg();
4832  unsigned Rt = Inst.getOperand(1).getReg();
4833  unsigned Rn = Inst.getOperand(2).getReg();
4834  if (RI->isSubRegisterEq(Rt, Rs) ||
4835  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4836  return Error(Loc[0],
4837  "unpredictable STXR instruction, status is also a source");
4838  break;
4839  }
4840  case AArch64::STXPW:
4841  case AArch64::STXPX:
4842  case AArch64::STLXPW:
4843  case AArch64::STLXPX: {
4844  unsigned Rs = Inst.getOperand(0).getReg();
4845  unsigned Rt1 = Inst.getOperand(1).getReg();
4846  unsigned Rt2 = Inst.getOperand(2).getReg();
4847  unsigned Rn = Inst.getOperand(3).getReg();
4848  if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4849  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4850  return Error(Loc[0],
4851  "unpredictable STXP instruction, status is also a source");
4852  break;
4853  }
4854  case AArch64::LDRABwriteback:
4855  case AArch64::LDRAAwriteback: {
4856  unsigned Xt = Inst.getOperand(0).getReg();
4857  unsigned Xn = Inst.getOperand(1).getReg();
4858  if (Xt == Xn)
4859  return Error(Loc[0],
4860  "unpredictable LDRA instruction, writeback base"
4861  " is also a destination");
4862  break;
4863  }
4864  }
4865 
4866 
4867  // Now check immediate ranges. Separate from the above as there is overlap
4868  // in the instructions being checked and this keeps the nested conditionals
4869  // to a minimum.
4870  switch (Inst.getOpcode()) {
4871  case AArch64::ADDSWri:
4872  case AArch64::ADDSXri:
4873  case AArch64::ADDWri:
4874  case AArch64::ADDXri:
4875  case AArch64::SUBSWri:
4876  case AArch64::SUBSXri:
4877  case AArch64::SUBWri:
4878  case AArch64::SUBXri: {
4879  // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4880  // some slight duplication here.
4881  if (Inst.getOperand(2).isExpr()) {
4882  const MCExpr *Expr = Inst.getOperand(2).getExpr();
4883  AArch64MCExpr::VariantKind ELFRefKind;
4884  MCSymbolRefExpr::VariantKind DarwinRefKind;
4885  int64_t Addend;
4886  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4887 
4888  // Only allow these with ADDXri.
4889  if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4890  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4891  Inst.getOpcode() == AArch64::ADDXri)
4892  return false;
4893 
4894  // Only allow these with ADDXri/ADDWri
4895  if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4896  ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4897  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4898  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4899  ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4900  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4901  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4902  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4903  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4904  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4905  (Inst.getOpcode() == AArch64::ADDXri ||
4906  Inst.getOpcode() == AArch64::ADDWri))
4907  return false;
4908 
4909  // Don't allow symbol refs in the immediate field otherwise
4910  // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4911  // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4912  // 'cmp w0, 'borked')
4913  return Error(Loc.back(), "invalid immediate expression");
4914  }
4915  // We don't validate more complex expressions here
4916  }
4917  return false;
4918  }
4919  default:
4920  return false;
4921  }
4922 }
4923 
4924 static std::string AArch64MnemonicSpellCheck(StringRef S,
4925  const FeatureBitset &FBS,
4926  unsigned VariantID = 0);
4927 
4928 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4931  switch (ErrCode) {
4932  case Match_InvalidTiedOperand: {
4933  RegConstraintEqualityTy EqTy =
4934  static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4935  .getRegEqualityTy();
4936  switch (EqTy) {
4937  case RegConstraintEqualityTy::EqualsSubReg:
4938  return Error(Loc, "operand must be 64-bit form of destination register");
4939  case RegConstraintEqualityTy::EqualsSuperReg:
4940  return Error(Loc, "operand must be 32-bit form of destination register");
4941  case RegConstraintEqualityTy::EqualsReg:
4942  return Error(Loc, "operand must match destination register");
4943  }
4944  llvm_unreachable("Unknown RegConstraintEqualityTy");
4945  }
4946  case Match_MissingFeature:
4947  return Error(Loc,
4948  "instruction requires a CPU feature not currently enabled");
4949  case Match_InvalidOperand:
4950  return Error(Loc, "invalid operand for instruction");
4951  case Match_InvalidSuffix:
4952  return Error(Loc, "invalid type suffix for instruction");
4953  case Match_InvalidCondCode:
4954  return Error(Loc, "expected AArch64 condition code");
4955  case Match_AddSubRegExtendSmall:
4956  return Error(Loc,
4957  "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4958  case Match_AddSubRegExtendLarge:
4959  return Error(Loc,
4960  "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4961  case Match_AddSubSecondSource:
4962  return Error(Loc,
4963  "expected compatible register, symbol or integer in range [0, 4095]");
4964  case Match_LogicalSecondSource:
4965  return Error(Loc, "expected compatible register or logical immediate");
4966  case Match_InvalidMovImm32Shift:
4967  return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4968  case Match_InvalidMovImm64Shift:
4969  return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4970  case Match_AddSubRegShift32:
4971  return Error(Loc,
4972  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4973  case Match_AddSubRegShift64:
4974  return Error(Loc,
4975  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4976  case Match_InvalidFPImm:
4977  return Error(Loc,
4978  "expected compatible register or floating-point constant");
4979  case Match_InvalidMemoryIndexedSImm6:
4980  return Error(Loc, "index must be an integer in range [-32, 31].");
4981  case Match_InvalidMemoryIndexedSImm5:
4982  return Error(Loc, "index must be an integer in range [-16, 15].");
4983  case Match_InvalidMemoryIndexed1SImm4:
4984  return Error(Loc, "index must be an integer in range [-8, 7].");
4985  case Match_InvalidMemoryIndexed2SImm4:
4986  return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4987  case Match_InvalidMemoryIndexed3SImm4:
4988  return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4989  case Match_InvalidMemoryIndexed4SImm4:
4990  return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4991  case Match_InvalidMemoryIndexed16SImm4:
4992  return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4993  case Match_InvalidMemoryIndexed32SImm4:
4994  return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
4995  case Match_InvalidMemoryIndexed1SImm6:
4996  return Error(Loc, "index must be an integer in range [-32, 31].");
4997  case Match_InvalidMemoryIndexedSImm8:
4998  return Error(Loc, "index must be an integer in range [-128, 127].");
4999  case Match_InvalidMemoryIndexedSImm9:
5000  return Error(Loc, "index must be an integer in range [-256, 255].");
5001  case Match_InvalidMemoryIndexed16SImm9:
5002  return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5003  case Match_InvalidMemoryIndexed8SImm10:
5004  return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5005  case Match_InvalidMemoryIndexed4SImm7:
5006  return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5007  case Match_InvalidMemoryIndexed8SImm7:
5008  return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5009  case Match_InvalidMemoryIndexed16SImm7:
5010  return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5011  case Match_InvalidMemoryIndexed8UImm5:
5012  return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5013  case Match_InvalidMemoryIndexed4UImm5:
5014  return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5015  case Match_InvalidMemoryIndexed2UImm5:
5016  return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5017  case Match_InvalidMemoryIndexed8UImm6:
5018  return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5019  case Match_InvalidMemoryIndexed16UImm6:
5020  return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5021  case Match_InvalidMemoryIndexed4UImm6:
5022  return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5023  case Match_InvalidMemoryIndexed2UImm6:
5024  return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5025  case Match_InvalidMemoryIndexed1UImm6:
5026  return Error(Loc, "index must be in range [0, 63].");
5027  case Match_InvalidMemoryWExtend8:
5028  return Error(Loc,
5029  "expected 'uxtw' or 'sxtw' with optional shift of #0");
5030  case Match_InvalidMemoryWExtend16:
5031  return Error(Loc,
5032  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5033  case Match_InvalidMemoryWExtend32:
5034  return Error(Loc,
5035  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5036  case Match_InvalidMemoryWExtend64:
5037  return Error(Loc,
5038  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5039  case Match_InvalidMemoryWExtend128:
5040  return Error(Loc,
5041  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5042  case Match_InvalidMemoryXExtend8:
5043  return Error(Loc,
5044  "expected 'lsl' or 'sxtx' with optional shift of #0");
5045  case Match_InvalidMemoryXExtend16:
5046  return Error(Loc,
5047  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5048  case Match_InvalidMemoryXExtend32:
5049  return Error(Loc,
5050  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5051  case Match_InvalidMemoryXExtend64:
5052  return Error(Loc,
5053  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5054  case Match_InvalidMemoryXExtend128:
5055  return Error(Loc,
5056  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5057  case Match_InvalidMemoryIndexed1:
5058  return Error(Loc, "index must be an integer in range [0, 4095].");
5059  case Match_InvalidMemoryIndexed2:
5060  return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
5061  case Match_InvalidMemoryIndexed4:
5062  return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
5063  case Match_InvalidMemoryIndexed8:
5064  return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
5065  case Match_InvalidMemoryIndexed16:
5066  return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
5067  case Match_InvalidImm0_0:
5068  return Error(Loc, "immediate must be 0.");
5069  case Match_InvalidImm0_1:
5070  return Error(Loc, "immediate must be an integer in range [0, 1].");
5071  case Match_InvalidImm0_3:
5072  return Error(Loc, "immediate must be an integer in range [0, 3].");
5073  case Match_InvalidImm0_7:
5074  return Error(Loc, "immediate must be an integer in range [0, 7].");
5075  case Match_InvalidImm0_15:
5076  return Error(Loc, "immediate must be an integer in range [0, 15].");
5077  case Match_InvalidImm0_31:
5078  return Error(Loc, "immediate must be an integer in range [0, 31].");
5079  case Match_InvalidImm0_63:
5080  return Error(Loc, "immediate must be an integer in range [0, 63].");
5081  case Match_InvalidImm0_127:
5082  return Error(Loc, "immediate must be an integer in range [0, 127].");
5083  case Match_InvalidImm0_255:
5084  return Error(Loc, "immediate must be an integer in range [0, 255].");
5085  case Match_InvalidImm0_65535:
5086  return Error(Loc, "immediate must be an integer in range [0, 65535].");
5087  case Match_InvalidImm1_8:
5088  return Error(Loc, "immediate must be an integer in range [1, 8].");
5089  case Match_InvalidImm1_16:
5090  return Error(Loc, "immediate must be an integer in range [1, 16].");
5091  case Match_InvalidImm1_32:
5092  return Error(Loc, "immediate must be an integer in range [1, 32].");
5093  case Match_InvalidImm1_64:
5094  return Error(Loc, "immediate must be an integer in range [1, 64].");
5095  case Match_InvalidSVEAddSubImm8:
5096  return Error(Loc, "immediate must be an integer in range [0, 255]"
5097  " with a shift amount of 0");
5098  case Match_InvalidSVEAddSubImm16:
5099  case Match_InvalidSVEAddSubImm32:
5100  case Match_InvalidSVEAddSubImm64:
5101  return Error(Loc, "immediate must be an integer in range [0, 255] or a "
5102  "multiple of 256 in range [256, 65280]");
5103  case Match_InvalidSVECpyImm8:
5104  return Error(Loc, "immediate must be an integer in range [-128, 255]"
5105  " with a shift amount of 0");
5106  case Match_InvalidSVECpyImm16:
5107  return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5108  "multiple of 256 in range [-32768, 65280]");
5109  case Match_InvalidSVECpyImm32:
5110  case Match_InvalidSVECpyImm64:
5111  return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5112  "multiple of 256 in range [-32768, 32512]");
5113  case Match_InvalidIndexRange0_0:
5114  return Error(Loc, "expected lane specifier '[0]'");
5115  case Match_InvalidIndexRange1_1:
5116  return Error(Loc, "expected lane specifier '[1]'");
5117  case Match_InvalidIndexRange0_15:
5118  return Error(Loc, "vector lane must be an integer in range [0, 15].");
5119  case Match_InvalidIndexRange0_7:
5120  return Error(Loc, "vector lane must be an integer in range [0, 7].");
5121  case Match_InvalidIndexRange0_3:
5122  return Error(Loc, "vector lane must be an integer in range [0, 3].");
5123  case Match_InvalidIndexRange0_1:
5124  return Error(Loc, "vector lane must be an integer in range [0, 1].");
5125  case Match_InvalidSVEIndexRange0_63:
5126  return Error(Loc, "vector lane must be an integer in range [0, 63].");
5127  case Match_InvalidSVEIndexRange0_31:
5128  return Error(Loc, "vector lane must be an integer in range [0, 31].");
5129  case Match_InvalidSVEIndexRange0_15:
5130  return Error(Loc, "vector lane must be an integer in range [0, 15].");
5131  case Match_InvalidSVEIndexRange0_7:
5132  return Error(Loc, "vector lane must be an integer in range [0, 7].");
5133  case Match_InvalidSVEIndexRange0_3:
5134  return Error(Loc, "vector lane must be an integer in range [0, 3].");
5135  case Match_InvalidLabel:
5136  return Error(Loc, "expected label or encodable integer pc offset");
5137  case Match_MRS:
5138  return Error(Loc, "expected readable system register");
5139  case Match_MSR:
5140  case Match_InvalidSVCR:
5141  return Error(Loc, "expected writable system register or pstate");
5142  case Match_InvalidComplexRotationEven:
5143  return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
5144  case Match_InvalidComplexRotationOdd:
5145  return Error(Loc, "complex rotation must be 90 or 270.");
5146  case Match_MnemonicFail: {
5147  std::string Suggestion = AArch64MnemonicSpellCheck(
5148  ((AArch64Operand &)*Operands[0]).getToken(),
5149  ComputeAvailableFeatures(STI->getFeatureBits()));
5150  return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
5151  }
5152  case Match_InvalidGPR64shifted8:
5153  return Error(Loc, "register must be x0..x30 or xzr, without shift");
5154  case Match_InvalidGPR64shifted16:
5155  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
5156  case Match_InvalidGPR64shifted32:
5157  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
5158  case Match_InvalidGPR64shifted64:
5159  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
5160  case Match_InvalidGPR64shifted128:
5161  return Error(
5162  Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
5163  case Match_InvalidGPR64NoXZRshifted8:
5164  return Error(Loc, "register must be x0..x30 without shift");
5165  case Match_InvalidGPR64NoXZRshifted16:
5166  return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
5167  case Match_InvalidGPR64NoXZRshifted32:
5168  return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
5169  case Match_InvalidGPR64NoXZRshifted64:
5170  return Error<