LLVM  10.0.0svn
AArch64AsmParser.cpp
Go to the documentation of this file.
1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
14 #include "AArch64InstrInfo.h"
15 #include "Utils/AArch64BaseInfo.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/StringMap.h"
23 #include "llvm/ADT/StringRef.h"
24 #include "llvm/ADT/StringSwitch.h"
25 #include "llvm/ADT/Twine.h"
26 #include "llvm/MC/MCContext.h"
27 #include "llvm/MC/MCExpr.h"
28 #include "llvm/MC/MCInst.h"
36 #include "llvm/MC/MCRegisterInfo.h"
37 #include "llvm/MC/MCStreamer.h"
39 #include "llvm/MC/MCSymbol.h"
42 #include "llvm/MC/MCValue.h"
43 #include "llvm/Support/Casting.h"
44 #include "llvm/Support/Compiler.h"
47 #include "llvm/Support/SMLoc.h"
51 #include <cassert>
52 #include <cctype>
53 #include <cstdint>
54 #include <cstdio>
55 #include <string>
56 #include <tuple>
57 #include <utility>
58 #include <vector>
59 
60 using namespace llvm;
61 
62 namespace {
63 
64 enum class RegKind {
65  Scalar,
66  NeonVector,
67  SVEDataVector,
68  SVEPredicateVector
69 };
70 
72  EqualsReg,
73  EqualsSuperReg,
74  EqualsSubReg
75 };
76 
77 class AArch64AsmParser : public MCTargetAsmParser {
78 private:
79  StringRef Mnemonic; ///< Instruction mnemonic.
80 
81  // Map of register aliases registers via the .req directive.
83 
84  class PrefixInfo {
85  public:
86  static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
87  PrefixInfo Prefix;
88  switch (Inst.getOpcode()) {
89  case AArch64::MOVPRFX_ZZ:
90  Prefix.Active = true;
91  Prefix.Dst = Inst.getOperand(0).getReg();
92  break;
93  case AArch64::MOVPRFX_ZPmZ_B:
94  case AArch64::MOVPRFX_ZPmZ_H:
95  case AArch64::MOVPRFX_ZPmZ_S:
96  case AArch64::MOVPRFX_ZPmZ_D:
97  Prefix.Active = true;
98  Prefix.Predicated = true;
99  Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
100  assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
101  "No destructive element size set for movprfx");
102  Prefix.Dst = Inst.getOperand(0).getReg();
103  Prefix.Pg = Inst.getOperand(2).getReg();
104  break;
105  case AArch64::MOVPRFX_ZPzZ_B:
106  case AArch64::MOVPRFX_ZPzZ_H:
107  case AArch64::MOVPRFX_ZPzZ_S:
108  case AArch64::MOVPRFX_ZPzZ_D:
109  Prefix.Active = true;
110  Prefix.Predicated = true;
111  Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
112  assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
113  "No destructive element size set for movprfx");
114  Prefix.Dst = Inst.getOperand(0).getReg();
115  Prefix.Pg = Inst.getOperand(1).getReg();
116  break;
117  default:
118  break;
119  }
120 
121  return Prefix;
122  }
123 
124  PrefixInfo() : Active(false), Predicated(false) {}
125  bool isActive() const { return Active; }
126  bool isPredicated() const { return Predicated; }
127  unsigned getElementSize() const {
128  assert(Predicated);
129  return ElementSize;
130  }
131  unsigned getDstReg() const { return Dst; }
132  unsigned getPgReg() const {
133  assert(Predicated);
134  return Pg;
135  }
136 
137  private:
138  bool Active;
139  bool Predicated;
140  unsigned ElementSize;
141  unsigned Dst;
142  unsigned Pg;
143  } NextPrefix;
144 
145  AArch64TargetStreamer &getTargetStreamer() {
146  MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
147  return static_cast<AArch64TargetStreamer &>(TS);
148  }
149 
150  SMLoc getLoc() const { return getParser().getTok().getLoc(); }
151 
152  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
153  void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
154  AArch64CC::CondCode parseCondCodeString(StringRef Cond);
155  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
156  unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
157  bool parseRegister(OperandVector &Operands);
158  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
159  bool parseNeonVectorList(OperandVector &Operands);
160  bool parseOptionalMulOperand(OperandVector &Operands);
161  bool parseOperand(OperandVector &Operands, bool isCondCode,
162  bool invertCondCode);
163 
164  bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
165  OperandVector &Operands);
166 
167  bool parseDirectiveArch(SMLoc L);
168  bool parseDirectiveArchExtension(SMLoc L);
169  bool parseDirectiveCPU(SMLoc L);
170  bool parseDirectiveInst(SMLoc L);
171 
172  bool parseDirectiveTLSDescCall(SMLoc L);
173 
174  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
175  bool parseDirectiveLtorg(SMLoc L);
176 
177  bool parseDirectiveReq(StringRef Name, SMLoc L);
178  bool parseDirectiveUnreq(SMLoc L);
179  bool parseDirectiveCFINegateRAState();
180  bool parseDirectiveCFIBKeyFrame();
181 
182  bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
184  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
185  OperandVector &Operands, MCStreamer &Out,
186  uint64_t &ErrorInfo,
187  bool MatchingInlineAsm) override;
188 /// @name Auto-generated Match Functions
189 /// {
190 
191 #define GET_ASSEMBLER_HEADER
192 #include "AArch64GenAsmMatcher.inc"
193 
194  /// }
195 
196  OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
197  OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
198  RegKind MatchKind);
199  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
200  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
201  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
202  OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
203  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
204  template <bool IsSVEPrefetch = false>
205  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
206  OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
207  OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
208  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
209  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
210  template<bool AddFPZeroAsLiteral>
211  OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
212  OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
213  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
214  bool tryParseNeonVectorRegister(OperandVector &Operands);
215  OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
216  OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
217  template <bool ParseShiftExtend,
218  RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
219  OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
220  template <bool ParseShiftExtend, bool ParseSuffix>
221  OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
222  OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
223  template <RegKind VectorKind>
224  OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
225  bool ExpectMatch = false);
226  OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
227 
228 public:
229  enum AArch64MatchResultTy {
230  Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
231 #define GET_OPERAND_DIAGNOSTIC_TYPES
232 #include "AArch64GenAsmMatcher.inc"
233  };
234  bool IsILP32;
235 
236  AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
237  const MCInstrInfo &MII, const MCTargetOptions &Options)
238  : MCTargetAsmParser(Options, STI, MII) {
239  IsILP32 = Options.getABIName() == "ilp32";
241  MCStreamer &S = getParser().getStreamer();
242  if (S.getTargetStreamer() == nullptr)
243  new AArch64TargetStreamer(S);
244 
245  // Alias .hword/.word/.[dx]word to the target-independent
246  // .2byte/.4byte/.8byte directives as they have the same form and
247  // semantics:
248  /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
249  Parser.addAliasForDirective(".hword", ".2byte");
250  Parser.addAliasForDirective(".word", ".4byte");
251  Parser.addAliasForDirective(".dword", ".8byte");
252  Parser.addAliasForDirective(".xword", ".8byte");
253 
254  // Initialize the set of available features.
255  setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
256  }
257 
258  bool regsEqual(const MCParsedAsmOperand &Op1,
259  const MCParsedAsmOperand &Op2) const override;
260  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
261  SMLoc NameLoc, OperandVector &Operands) override;
262  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
263  bool ParseDirective(AsmToken DirectiveID) override;
264  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
265  unsigned Kind) override;
266 
267  static bool classifySymbolRef(const MCExpr *Expr,
268  AArch64MCExpr::VariantKind &ELFRefKind,
269  MCSymbolRefExpr::VariantKind &DarwinRefKind,
270  int64_t &Addend);
271 };
272 
273 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
274 /// instruction.
275 class AArch64Operand : public MCParsedAsmOperand {
276 private:
277  enum KindTy {
278  k_Immediate,
279  k_ShiftedImm,
280  k_CondCode,
281  k_Register,
282  k_VectorList,
283  k_VectorIndex,
284  k_Token,
285  k_SysReg,
286  k_SysCR,
287  k_Prefetch,
288  k_ShiftExtend,
289  k_FPImm,
290  k_Barrier,
291  k_PSBHint,
292  k_BTIHint,
293  } Kind;
294 
295  SMLoc StartLoc, EndLoc;
296 
297  struct TokOp {
298  const char *Data;
299  unsigned Length;
300  bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
301  };
302 
303  // Separate shift/extend operand.
304  struct ShiftExtendOp {
306  unsigned Amount;
307  bool HasExplicitAmount;
308  };
309 
310  struct RegOp {
311  unsigned RegNum;
312  RegKind Kind;
313  int ElementWidth;
314 
315  // The register may be allowed as a different register class,
316  // e.g. for GPR64as32 or GPR32as64.
317  RegConstraintEqualityTy EqualityTy;
318 
319  // In some cases the shift/extend needs to be explicitly parsed together
320  // with the register, rather than as a separate operand. This is needed
321  // for addressing modes where the instruction as a whole dictates the
322  // scaling/extend, rather than specific bits in the instruction.
323  // By parsing them as a single operand, we avoid the need to pass an
324  // extra operand in all CodeGen patterns (because all operands need to
325  // have an associated value), and we avoid the need to update TableGen to
326  // accept operands that have no associated bits in the instruction.
327  //
328  // An added benefit of parsing them together is that the assembler
329  // can give a sensible diagnostic if the scaling is not correct.
330  //
331  // The default is 'lsl #0' (HasExplicitAmount = false) if no
332  // ShiftExtend is specified.
333  ShiftExtendOp ShiftExtend;
334  };
335 
336  struct VectorListOp {
337  unsigned RegNum;
338  unsigned Count;
339  unsigned NumElements;
340  unsigned ElementWidth;
342  };
343 
344  struct VectorIndexOp {
345  unsigned Val;
346  };
347 
348  struct ImmOp {
349  const MCExpr *Val;
350  };
351 
352  struct ShiftedImmOp {
353  const MCExpr *Val;
354  unsigned ShiftAmount;
355  };
356 
357  struct CondCodeOp {
358  AArch64CC::CondCode Code;
359  };
360 
361  struct FPImmOp {
362  uint64_t Val; // APFloat value bitcasted to uint64_t.
363  bool IsExact; // describes whether parsed value was exact.
364  };
365 
366  struct BarrierOp {
367  const char *Data;
368  unsigned Length;
369  unsigned Val; // Not the enum since not all values have names.
370  };
371 
372  struct SysRegOp {
373  const char *Data;
374  unsigned Length;
375  uint32_t MRSReg;
376  uint32_t MSRReg;
377  uint32_t PStateField;
378  };
379 
380  struct SysCRImmOp {
381  unsigned Val;
382  };
383 
384  struct PrefetchOp {
385  const char *Data;
386  unsigned Length;
387  unsigned Val;
388  };
389 
390  struct PSBHintOp {
391  const char *Data;
392  unsigned Length;
393  unsigned Val;
394  };
395 
396  struct BTIHintOp {
397  const char *Data;
398  unsigned Length;
399  unsigned Val;
400  };
401 
402  struct ExtendOp {
403  unsigned Val;
404  };
405 
406  union {
407  struct TokOp Tok;
408  struct RegOp Reg;
409  struct VectorListOp VectorList;
410  struct VectorIndexOp VectorIndex;
411  struct ImmOp Imm;
412  struct ShiftedImmOp ShiftedImm;
413  struct CondCodeOp CondCode;
414  struct FPImmOp FPImm;
415  struct BarrierOp Barrier;
416  struct SysRegOp SysReg;
417  struct SysCRImmOp SysCRImm;
418  struct PrefetchOp Prefetch;
419  struct PSBHintOp PSBHint;
420  struct BTIHintOp BTIHint;
421  struct ShiftExtendOp ShiftExtend;
422  };
423 
424  // Keep the MCContext around as the MCExprs may need manipulated during
425  // the add<>Operands() calls.
426  MCContext &Ctx;
427 
428 public:
429  AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
430 
431  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
432  Kind = o.Kind;
433  StartLoc = o.StartLoc;
434  EndLoc = o.EndLoc;
435  switch (Kind) {
436  case k_Token:
437  Tok = o.Tok;
438  break;
439  case k_Immediate:
440  Imm = o.Imm;
441  break;
442  case k_ShiftedImm:
443  ShiftedImm = o.ShiftedImm;
444  break;
445  case k_CondCode:
446  CondCode = o.CondCode;
447  break;
448  case k_FPImm:
449  FPImm = o.FPImm;
450  break;
451  case k_Barrier:
452  Barrier = o.Barrier;
453  break;
454  case k_Register:
455  Reg = o.Reg;
456  break;
457  case k_VectorList:
458  VectorList = o.VectorList;
459  break;
460  case k_VectorIndex:
461  VectorIndex = o.VectorIndex;
462  break;
463  case k_SysReg:
464  SysReg = o.SysReg;
465  break;
466  case k_SysCR:
467  SysCRImm = o.SysCRImm;
468  break;
469  case k_Prefetch:
470  Prefetch = o.Prefetch;
471  break;
472  case k_PSBHint:
473  PSBHint = o.PSBHint;
474  break;
475  case k_BTIHint:
476  BTIHint = o.BTIHint;
477  break;
478  case k_ShiftExtend:
479  ShiftExtend = o.ShiftExtend;
480  break;
481  }
482  }
483 
484  /// getStartLoc - Get the location of the first token of this operand.
485  SMLoc getStartLoc() const override { return StartLoc; }
486  /// getEndLoc - Get the location of the last token of this operand.
487  SMLoc getEndLoc() const override { return EndLoc; }
488 
489  StringRef getToken() const {
490  assert(Kind == k_Token && "Invalid access!");
491  return StringRef(Tok.Data, Tok.Length);
492  }
493 
494  bool isTokenSuffix() const {
495  assert(Kind == k_Token && "Invalid access!");
496  return Tok.IsSuffix;
497  }
498 
499  const MCExpr *getImm() const {
500  assert(Kind == k_Immediate && "Invalid access!");
501  return Imm.Val;
502  }
503 
504  const MCExpr *getShiftedImmVal() const {
505  assert(Kind == k_ShiftedImm && "Invalid access!");
506  return ShiftedImm.Val;
507  }
508 
509  unsigned getShiftedImmShift() const {
510  assert(Kind == k_ShiftedImm && "Invalid access!");
511  return ShiftedImm.ShiftAmount;
512  }
513 
515  assert(Kind == k_CondCode && "Invalid access!");
516  return CondCode.Code;
517  }
518 
519  APFloat getFPImm() const {
520  assert (Kind == k_FPImm && "Invalid access!");
521  return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
522  }
523 
524  bool getFPImmIsExact() const {
525  assert (Kind == k_FPImm && "Invalid access!");
526  return FPImm.IsExact;
527  }
528 
529  unsigned getBarrier() const {
530  assert(Kind == k_Barrier && "Invalid access!");
531  return Barrier.Val;
532  }
533 
534  StringRef getBarrierName() const {
535  assert(Kind == k_Barrier && "Invalid access!");
536  return StringRef(Barrier.Data, Barrier.Length);
537  }
538 
539  unsigned getReg() const override {
540  assert(Kind == k_Register && "Invalid access!");
541  return Reg.RegNum;
542  }
543 
544  RegConstraintEqualityTy getRegEqualityTy() const {
545  assert(Kind == k_Register && "Invalid access!");
546  return Reg.EqualityTy;
547  }
548 
549  unsigned getVectorListStart() const {
550  assert(Kind == k_VectorList && "Invalid access!");
551  return VectorList.RegNum;
552  }
553 
554  unsigned getVectorListCount() const {
555  assert(Kind == k_VectorList && "Invalid access!");
556  return VectorList.Count;
557  }
558 
559  unsigned getVectorIndex() const {
560  assert(Kind == k_VectorIndex && "Invalid access!");
561  return VectorIndex.Val;
562  }
563 
564  StringRef getSysReg() const {
565  assert(Kind == k_SysReg && "Invalid access!");
566  return StringRef(SysReg.Data, SysReg.Length);
567  }
568 
569  unsigned getSysCR() const {
570  assert(Kind == k_SysCR && "Invalid access!");
571  return SysCRImm.Val;
572  }
573 
574  unsigned getPrefetch() const {
575  assert(Kind == k_Prefetch && "Invalid access!");
576  return Prefetch.Val;
577  }
578 
579  unsigned getPSBHint() const {
580  assert(Kind == k_PSBHint && "Invalid access!");
581  return PSBHint.Val;
582  }
583 
584  StringRef getPSBHintName() const {
585  assert(Kind == k_PSBHint && "Invalid access!");
586  return StringRef(PSBHint.Data, PSBHint.Length);
587  }
588 
589  unsigned getBTIHint() const {
590  assert(Kind == k_BTIHint && "Invalid access!");
591  return BTIHint.Val;
592  }
593 
594  StringRef getBTIHintName() const {
595  assert(Kind == k_BTIHint && "Invalid access!");
596  return StringRef(BTIHint.Data, BTIHint.Length);
597  }
598 
599  StringRef getPrefetchName() const {
600  assert(Kind == k_Prefetch && "Invalid access!");
601  return StringRef(Prefetch.Data, Prefetch.Length);
602  }
603 
604  AArch64_AM::ShiftExtendType getShiftExtendType() const {
605  if (Kind == k_ShiftExtend)
606  return ShiftExtend.Type;
607  if (Kind == k_Register)
608  return Reg.ShiftExtend.Type;
609  llvm_unreachable("Invalid access!");
610  }
611 
612  unsigned getShiftExtendAmount() const {
613  if (Kind == k_ShiftExtend)
614  return ShiftExtend.Amount;
615  if (Kind == k_Register)
616  return Reg.ShiftExtend.Amount;
617  llvm_unreachable("Invalid access!");
618  }
619 
620  bool hasShiftExtendAmount() const {
621  if (Kind == k_ShiftExtend)
622  return ShiftExtend.HasExplicitAmount;
623  if (Kind == k_Register)
624  return Reg.ShiftExtend.HasExplicitAmount;
625  llvm_unreachable("Invalid access!");
626  }
627 
628  bool isImm() const override { return Kind == k_Immediate; }
629  bool isMem() const override { return false; }
630 
631  bool isUImm6() const {
632  if (!isImm())
633  return false;
634  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
635  if (!MCE)
636  return false;
637  int64_t Val = MCE->getValue();
638  return (Val >= 0 && Val < 64);
639  }
640 
641  template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
642 
643  template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
644  return isImmScaled<Bits, Scale>(true);
645  }
646 
647  template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
648  return isImmScaled<Bits, Scale>(false);
649  }
650 
651  template <int Bits, int Scale>
652  DiagnosticPredicate isImmScaled(bool Signed) const {
653  if (!isImm())
655 
656  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
657  if (!MCE)
659 
660  int64_t MinVal, MaxVal;
661  if (Signed) {
662  int64_t Shift = Bits - 1;
663  MinVal = (int64_t(1) << Shift) * -Scale;
664  MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
665  } else {
666  MinVal = 0;
667  MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
668  }
669 
670  int64_t Val = MCE->getValue();
671  if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
673 
675  }
676 
677  DiagnosticPredicate isSVEPattern() const {
678  if (!isImm())
680  auto *MCE = dyn_cast<MCConstantExpr>(getImm());
681  if (!MCE)
683  int64_t Val = MCE->getValue();
684  if (Val >= 0 && Val < 32)
687  }
688 
689  bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
690  AArch64MCExpr::VariantKind ELFRefKind;
691  MCSymbolRefExpr::VariantKind DarwinRefKind;
692  int64_t Addend;
693  if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
694  Addend)) {
695  // If we don't understand the expression, assume the best and
696  // let the fixup and relocation code deal with it.
697  return true;
698  }
699 
700  if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
701  ELFRefKind == AArch64MCExpr::VK_LO12 ||
702  ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
703  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
704  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
705  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
706  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
707  ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
708  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
709  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
710  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
711  // Note that we don't range-check the addend. It's adjusted modulo page
712  // size when converted, so there is no "out of range" condition when using
713  // @pageoff.
714  return true;
715  } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
716  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
717  // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
718  return Addend == 0;
719  }
720 
721  return false;
722  }
723 
724  template <int Scale> bool isUImm12Offset() const {
725  if (!isImm())
726  return false;
727 
728  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
729  if (!MCE)
730  return isSymbolicUImm12Offset(getImm());
731 
732  int64_t Val = MCE->getValue();
733  return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
734  }
735 
736  template <int N, int M>
737  bool isImmInRange() const {
738  if (!isImm())
739  return false;
740  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
741  if (!MCE)
742  return false;
743  int64_t Val = MCE->getValue();
744  return (Val >= N && Val <= M);
745  }
746 
747  // NOTE: Also used for isLogicalImmNot as anything that can be represented as
748  // a logical immediate can always be represented when inverted.
749  template <typename T>
750  bool isLogicalImm() const {
751  if (!isImm())
752  return false;
753  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
754  if (!MCE)
755  return false;
756 
757  int64_t Val = MCE->getValue();
758  int64_t SVal = typename std::make_signed<T>::type(Val);
759  int64_t UVal = typename std::make_unsigned<T>::type(Val);
760  if (Val != SVal && Val != UVal)
761  return false;
762 
763  return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8);
764  }
765 
766  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
767 
768  /// Returns the immediate value as a pair of (imm, shift) if the immediate is
769  /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
770  /// immediate that can be shifted by 'Shift'.
771  template <unsigned Width>
772  Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
773  if (isShiftedImm() && Width == getShiftedImmShift())
774  if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
775  return std::make_pair(CE->getValue(), Width);
776 
777  if (isImm())
778  if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
779  int64_t Val = CE->getValue();
780  if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
781  return std::make_pair(Val >> Width, Width);
782  else
783  return std::make_pair(Val, 0u);
784  }
785 
786  return {};
787  }
788 
789  bool isAddSubImm() const {
790  if (!isShiftedImm() && !isImm())
791  return false;
792 
793  const MCExpr *Expr;
794 
795  // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
796  if (isShiftedImm()) {
797  unsigned Shift = ShiftedImm.ShiftAmount;
798  Expr = ShiftedImm.Val;
799  if (Shift != 0 && Shift != 12)
800  return false;
801  } else {
802  Expr = getImm();
803  }
804 
805  AArch64MCExpr::VariantKind ELFRefKind;
806  MCSymbolRefExpr::VariantKind DarwinRefKind;
807  int64_t Addend;
808  if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
809  DarwinRefKind, Addend)) {
810  return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
811  || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
812  || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
813  || ELFRefKind == AArch64MCExpr::VK_LO12
814  || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
815  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
816  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
817  || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
818  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
819  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
820  || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
821  || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
822  || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
823  }
824 
825  // If it's a constant, it should be a real immediate in range.
826  if (auto ShiftedVal = getShiftedVal<12>())
827  return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
828 
829  // If it's an expression, we hope for the best and let the fixup/relocation
830  // code deal with it.
831  return true;
832  }
833 
834  bool isAddSubImmNeg() const {
835  if (!isShiftedImm() && !isImm())
836  return false;
837 
838  // Otherwise it should be a real negative immediate in range.
839  if (auto ShiftedVal = getShiftedVal<12>())
840  return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
841 
842  return false;
843  }
844 
845  // Signed value in the range -128 to +127. For element widths of
846  // 16 bits or higher it may also be a signed multiple of 256 in the
847  // range -32768 to +32512.
848  // For element-width of 8 bits a range of -128 to 255 is accepted,
849  // since a copy of a byte can be either signed/unsigned.
850  template <typename T>
852  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
854 
855  bool IsByte =
856  std::is_same<int8_t, typename std::make_signed<T>::type>::value;
857  if (auto ShiftedImm = getShiftedVal<8>())
858  if (!(IsByte && ShiftedImm->second) &&
859  AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
860  << ShiftedImm->second))
862 
864  }
865 
866  // Unsigned value in the range 0 to 255. For element widths of
867  // 16 bits or higher it may also be a signed multiple of 256 in the
868  // range 0 to 65280.
869  template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
870  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
872 
873  bool IsByte =
874  std::is_same<int8_t, typename std::make_signed<T>::type>::value;
875  if (auto ShiftedImm = getShiftedVal<8>())
876  if (!(IsByte && ShiftedImm->second) &&
877  AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
878  << ShiftedImm->second))
880 
882  }
883 
884  template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
885  if (isLogicalImm<T>() && !isSVECpyImm<T>())
888  }
889 
890  bool isCondCode() const { return Kind == k_CondCode; }
891 
892  bool isSIMDImmType10() const {
893  if (!isImm())
894  return false;
895  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
896  if (!MCE)
897  return false;
899  }
900 
901  template<int N>
902  bool isBranchTarget() const {
903  if (!isImm())
904  return false;
905  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
906  if (!MCE)
907  return true;
908  int64_t Val = MCE->getValue();
909  if (Val & 0x3)
910  return false;
911  assert(N > 0 && "Branch target immediate cannot be 0 bits!");
912  return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
913  }
914 
915  bool
916  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
917  if (!isImm())
918  return false;
919 
920  AArch64MCExpr::VariantKind ELFRefKind;
921  MCSymbolRefExpr::VariantKind DarwinRefKind;
922  int64_t Addend;
923  if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
924  DarwinRefKind, Addend)) {
925  return false;
926  }
927  if (DarwinRefKind != MCSymbolRefExpr::VK_None)
928  return false;
929 
930  for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
931  if (ELFRefKind == AllowedModifiers[i])
932  return true;
933  }
934 
935  return false;
936  }
937 
938  bool isMovWSymbolG3() const {
939  return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
940  }
941 
942  bool isMovWSymbolG2() const {
943  return isMovWSymbol(
948  }
949 
950  bool isMovWSymbolG1() const {
951  return isMovWSymbol(
957  }
958 
959  bool isMovWSymbolG0() const {
960  return isMovWSymbol(
966  }
967 
968  template<int RegWidth, int Shift>
969  bool isMOVZMovAlias() const {
970  if (!isImm()) return false;
971 
972  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
973  if (!CE) return false;
974  uint64_t Value = CE->getValue();
975 
976  return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
977  }
978 
979  template<int RegWidth, int Shift>
980  bool isMOVNMovAlias() const {
981  if (!isImm()) return false;
982 
983  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
984  if (!CE) return false;
985  uint64_t Value = CE->getValue();
986 
987  return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
988  }
989 
990  bool isFPImm() const {
991  return Kind == k_FPImm &&
992  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
993  }
994 
995  bool isBarrier() const { return Kind == k_Barrier; }
996  bool isSysReg() const { return Kind == k_SysReg; }
997 
998  bool isMRSSystemRegister() const {
999  if (!isSysReg()) return false;
1000 
1001  return SysReg.MRSReg != -1U;
1002  }
1003 
1004  bool isMSRSystemRegister() const {
1005  if (!isSysReg()) return false;
1006  return SysReg.MSRReg != -1U;
1007  }
1008 
1009  bool isSystemPStateFieldWithImm0_1() const {
1010  if (!isSysReg()) return false;
1011  return (SysReg.PStateField == AArch64PState::PAN ||
1012  SysReg.PStateField == AArch64PState::DIT ||
1013  SysReg.PStateField == AArch64PState::UAO ||
1014  SysReg.PStateField == AArch64PState::SSBS);
1015  }
1016 
1017  bool isSystemPStateFieldWithImm0_15() const {
1018  if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1019  return SysReg.PStateField != -1U;
1020  }
1021 
1022  bool isReg() const override {
1023  return Kind == k_Register;
1024  }
1025 
1026  bool isScalarReg() const {
1027  return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1028  }
1029 
1030  bool isNeonVectorReg() const {
1031  return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1032  }
1033 
1034  bool isNeonVectorRegLo() const {
1035  return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1036  AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1037  Reg.RegNum);
1038  }
1039 
1040  template <unsigned Class> bool isSVEVectorReg() const {
1041  RegKind RK;
1042  switch (Class) {
1043  case AArch64::ZPRRegClassID:
1044  case AArch64::ZPR_3bRegClassID:
1045  case AArch64::ZPR_4bRegClassID:
1046  RK = RegKind::SVEDataVector;
1047  break;
1048  case AArch64::PPRRegClassID:
1049  case AArch64::PPR_3bRegClassID:
1050  RK = RegKind::SVEPredicateVector;
1051  break;
1052  default:
1053  llvm_unreachable("Unsupport register class");
1054  }
1055 
1056  return (Kind == k_Register && Reg.Kind == RK) &&
1057  AArch64MCRegisterClasses[Class].contains(getReg());
1058  }
1059 
1060  template <unsigned Class> bool isFPRasZPR() const {
1061  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1062  AArch64MCRegisterClasses[Class].contains(getReg());
1063  }
1064 
1065  template <int ElementWidth, unsigned Class>
1066  DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1067  if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1069 
1070  if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1072 
1074  }
1075 
1076  template <int ElementWidth, unsigned Class>
1077  DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1078  if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1080 
1081  if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1083 
1085  }
1086 
1087  template <int ElementWidth, unsigned Class,
1088  AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1089  bool ShiftWidthAlwaysSame>
1090  DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1091  auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1092  if (!VectorMatch.isMatch())
1094 
1095  // Give a more specific diagnostic when the user has explicitly typed in
1096  // a shift-amount that does not match what is expected, but for which
1097  // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1098  bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1099  if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1100  ShiftExtendTy == AArch64_AM::SXTW) &&
1101  !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1103 
1104  if (MatchShift && ShiftExtendTy == getShiftExtendType())
1106 
1108  }
1109 
1110  bool isGPR32as64() const {
1111  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1112  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1113  }
1114 
1115  bool isGPR64as32() const {
1116  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1117  AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1118  }
1119 
1120  bool isWSeqPair() const {
1121  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1122  AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1123  Reg.RegNum);
1124  }
1125 
1126  bool isXSeqPair() const {
1127  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1128  AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1129  Reg.RegNum);
1130  }
1131 
1132  template<int64_t Angle, int64_t Remainder>
1133  DiagnosticPredicate isComplexRotation() const {
1134  if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1135 
1136  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1137  if (!CE) return DiagnosticPredicateTy::NoMatch;
1138  uint64_t Value = CE->getValue();
1139 
1140  if (Value % Angle == Remainder && Value <= 270)
1143  }
1144 
1145  template <unsigned RegClassID> bool isGPR64() const {
1146  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1147  AArch64MCRegisterClasses[RegClassID].contains(getReg());
1148  }
1149 
1150  template <unsigned RegClassID, int ExtWidth>
1151  DiagnosticPredicate isGPR64WithShiftExtend() const {
1152  if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1154 
1155  if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1156  getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1159  }
1160 
1161  /// Is this a vector list with the type implicit (presumably attached to the
1162  /// instruction itself)?
1163  template <RegKind VectorKind, unsigned NumRegs>
1164  bool isImplicitlyTypedVectorList() const {
1165  return Kind == k_VectorList && VectorList.Count == NumRegs &&
1166  VectorList.NumElements == 0 &&
1167  VectorList.RegisterKind == VectorKind;
1168  }
1169 
1170  template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1171  unsigned ElementWidth>
1172  bool isTypedVectorList() const {
1173  if (Kind != k_VectorList)
1174  return false;
1175  if (VectorList.Count != NumRegs)
1176  return false;
1177  if (VectorList.RegisterKind != VectorKind)
1178  return false;
1179  if (VectorList.ElementWidth != ElementWidth)
1180  return false;
1181  return VectorList.NumElements == NumElements;
1182  }
1183 
1184  template <int Min, int Max>
1185  DiagnosticPredicate isVectorIndex() const {
1186  if (Kind != k_VectorIndex)
1188  if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1191  }
1192 
1193  bool isToken() const override { return Kind == k_Token; }
1194 
1195  bool isTokenEqual(StringRef Str) const {
1196  return Kind == k_Token && getToken() == Str;
1197  }
1198  bool isSysCR() const { return Kind == k_SysCR; }
1199  bool isPrefetch() const { return Kind == k_Prefetch; }
1200  bool isPSBHint() const { return Kind == k_PSBHint; }
1201  bool isBTIHint() const { return Kind == k_BTIHint; }
1202  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1203  bool isShifter() const {
1204  if (!isShiftExtend())
1205  return false;
1206 
1207  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1208  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1209  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1210  ST == AArch64_AM::MSL);
1211  }
1212 
1213  template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1214  if (Kind != k_FPImm)
1216 
1217  if (getFPImmIsExact()) {
1218  // Lookup the immediate from table of supported immediates.
1219  auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1220  assert(Desc && "Unknown enum value");
1221 
1222  // Calculate its FP value.
1223  APFloat RealVal(APFloat::IEEEdouble());
1224  if (RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero) !=
1225  APFloat::opOK)
1226  llvm_unreachable("FP immediate is not exact");
1227 
1228  if (getFPImm().bitwiseIsEqual(RealVal))
1230  }
1231 
1233  }
1234 
1235  template <unsigned ImmA, unsigned ImmB>
1236  DiagnosticPredicate isExactFPImm() const {
1238  if ((Res = isExactFPImm<ImmA>()))
1240  if ((Res = isExactFPImm<ImmB>()))
1242  return Res;
1243  }
1244 
1245  bool isExtend() const {
1246  if (!isShiftExtend())
1247  return false;
1248 
1249  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1250  return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1251  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1252  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1253  ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1254  ET == AArch64_AM::LSL) &&
1255  getShiftExtendAmount() <= 4;
1256  }
1257 
1258  bool isExtend64() const {
1259  if (!isExtend())
1260  return false;
1261  // Make sure the extend expects a 32-bit source register.
1262  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1263  return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1264  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1265  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1266  }
1267 
1268  bool isExtendLSL64() const {
1269  if (!isExtend())
1270  return false;
1271  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1272  return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1273  ET == AArch64_AM::LSL) &&
1274  getShiftExtendAmount() <= 4;
1275  }
1276 
1277  template<int Width> bool isMemXExtend() const {
1278  if (!isExtend())
1279  return false;
1280  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1281  return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1282  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1283  getShiftExtendAmount() == 0);
1284  }
1285 
1286  template<int Width> bool isMemWExtend() const {
1287  if (!isExtend())
1288  return false;
1289  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1290  return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1291  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1292  getShiftExtendAmount() == 0);
1293  }
1294 
1295  template <unsigned width>
1296  bool isArithmeticShifter() const {
1297  if (!isShifter())
1298  return false;
1299 
1300  // An arithmetic shifter is LSL, LSR, or ASR.
1301  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1302  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1303  ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1304  }
1305 
1306  template <unsigned width>
1307  bool isLogicalShifter() const {
1308  if (!isShifter())
1309  return false;
1310 
1311  // A logical shifter is LSL, LSR, ASR or ROR.
1312  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1313  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1314  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1315  getShiftExtendAmount() < width;
1316  }
1317 
1318  bool isMovImm32Shifter() const {
1319  if (!isShifter())
1320  return false;
1321 
1322  // A MOVi shifter is LSL of 0, 16, 32, or 48.
1323  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1324  if (ST != AArch64_AM::LSL)
1325  return false;
1326  uint64_t Val = getShiftExtendAmount();
1327  return (Val == 0 || Val == 16);
1328  }
1329 
1330  bool isMovImm64Shifter() const {
1331  if (!isShifter())
1332  return false;
1333 
1334  // A MOVi shifter is LSL of 0 or 16.
1335  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1336  if (ST != AArch64_AM::LSL)
1337  return false;
1338  uint64_t Val = getShiftExtendAmount();
1339  return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1340  }
1341 
1342  bool isLogicalVecShifter() const {
1343  if (!isShifter())
1344  return false;
1345 
1346  // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1347  unsigned Shift = getShiftExtendAmount();
1348  return getShiftExtendType() == AArch64_AM::LSL &&
1349  (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1350  }
1351 
1352  bool isLogicalVecHalfWordShifter() const {
1353  if (!isLogicalVecShifter())
1354  return false;
1355 
1356  // A logical vector shifter is a left shift by 0 or 8.
1357  unsigned Shift = getShiftExtendAmount();
1358  return getShiftExtendType() == AArch64_AM::LSL &&
1359  (Shift == 0 || Shift == 8);
1360  }
1361 
1362  bool isMoveVecShifter() const {
1363  if (!isShiftExtend())
1364  return false;
1365 
1366  // A logical vector shifter is a left shift by 8 or 16.
1367  unsigned Shift = getShiftExtendAmount();
1368  return getShiftExtendType() == AArch64_AM::MSL &&
1369  (Shift == 8 || Shift == 16);
1370  }
1371 
1372  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1373  // to LDUR/STUR when the offset is not legal for the former but is for
1374  // the latter. As such, in addition to checking for being a legal unscaled
1375  // address, also check that it is not a legal scaled address. This avoids
1376  // ambiguity in the matcher.
1377  template<int Width>
1378  bool isSImm9OffsetFB() const {
1379  return isSImm<9>() && !isUImm12Offset<Width / 8>();
1380  }
1381 
1382  bool isAdrpLabel() const {
1383  // Validation was handled during parsing, so we just sanity check that
1384  // something didn't go haywire.
1385  if (!isImm())
1386  return false;
1387 
1388  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1389  int64_t Val = CE->getValue();
1390  int64_t Min = - (4096 * (1LL << (21 - 1)));
1391  int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1392  return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1393  }
1394 
1395  return true;
1396  }
1397 
1398  bool isAdrLabel() const {
1399  // Validation was handled during parsing, so we just sanity check that
1400  // something didn't go haywire.
1401  if (!isImm())
1402  return false;
1403 
1404  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1405  int64_t Val = CE->getValue();
1406  int64_t Min = - (1LL << (21 - 1));
1407  int64_t Max = ((1LL << (21 - 1)) - 1);
1408  return Val >= Min && Val <= Max;
1409  }
1410 
1411  return true;
1412  }
1413 
1414  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1415  // Add as immediates when possible. Null MCExpr = 0.
1416  if (!Expr)
1418  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1419  Inst.addOperand(MCOperand::createImm(CE->getValue()));
1420  else
1421  Inst.addOperand(MCOperand::createExpr(Expr));
1422  }
1423 
1424  void addRegOperands(MCInst &Inst, unsigned N) const {
1425  assert(N == 1 && "Invalid number of operands!");
1427  }
1428 
1429  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1430  assert(N == 1 && "Invalid number of operands!");
1431  assert(
1432  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1433 
1434  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1435  uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1436  RI->getEncodingValue(getReg()));
1437 
1438  Inst.addOperand(MCOperand::createReg(Reg));
1439  }
1440 
1441  void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1442  assert(N == 1 && "Invalid number of operands!");
1443  assert(
1444  AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1445 
1446  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1447  uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1448  RI->getEncodingValue(getReg()));
1449 
1450  Inst.addOperand(MCOperand::createReg(Reg));
1451  }
1452 
1453  template <int Width>
1454  void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1455  unsigned Base;
1456  switch (Width) {
1457  case 8: Base = AArch64::B0; break;
1458  case 16: Base = AArch64::H0; break;
1459  case 32: Base = AArch64::S0; break;
1460  case 64: Base = AArch64::D0; break;
1461  case 128: Base = AArch64::Q0; break;
1462  default:
1463  llvm_unreachable("Unsupported width");
1464  }
1465  Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1466  }
1467 
1468  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1469  assert(N == 1 && "Invalid number of operands!");
1470  assert(
1471  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1472  Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1473  }
1474 
1475  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1476  assert(N == 1 && "Invalid number of operands!");
1477  assert(
1478  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1480  }
1481 
1482  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1483  assert(N == 1 && "Invalid number of operands!");
1485  }
1486 
1487  enum VecListIndexType {
1488  VecListIdx_DReg = 0,
1489  VecListIdx_QReg = 1,
1490  VecListIdx_ZReg = 2,
1491  };
1492 
1493  template <VecListIndexType RegTy, unsigned NumRegs>
1494  void addVectorListOperands(MCInst &Inst, unsigned N) const {
1495  assert(N == 1 && "Invalid number of operands!");
1496  static const unsigned FirstRegs[][5] = {
1497  /* DReg */ { AArch64::Q0,
1498  AArch64::D0, AArch64::D0_D1,
1499  AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1500  /* QReg */ { AArch64::Q0,
1501  AArch64::Q0, AArch64::Q0_Q1,
1502  AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1503  /* ZReg */ { AArch64::Z0,
1504  AArch64::Z0, AArch64::Z0_Z1,
1505  AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1506  };
1507 
1508  assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1509  " NumRegs must be <= 4 for ZRegs");
1510 
1511  unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1512  Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1513  FirstRegs[(unsigned)RegTy][0]));
1514  }
1515 
1516  void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1517  assert(N == 1 && "Invalid number of operands!");
1518  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1519  }
1520 
1521  template <unsigned ImmIs0, unsigned ImmIs1>
1522  void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1523  assert(N == 1 && "Invalid number of operands!");
1524  assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1525  Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1526  }
1527 
1528  void addImmOperands(MCInst &Inst, unsigned N) const {
1529  assert(N == 1 && "Invalid number of operands!");
1530  // If this is a pageoff symrefexpr with an addend, adjust the addend
1531  // to be only the page-offset portion. Otherwise, just add the expr
1532  // as-is.
1533  addExpr(Inst, getImm());
1534  }
1535 
1536  template <int Shift>
1537  void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1538  assert(N == 2 && "Invalid number of operands!");
1539  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1540  Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1541  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1542  } else if (isShiftedImm()) {
1543  addExpr(Inst, getShiftedImmVal());
1544  Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1545  } else {
1546  addExpr(Inst, getImm());
1548  }
1549  }
1550 
1551  template <int Shift>
1552  void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1553  assert(N == 2 && "Invalid number of operands!");
1554  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1555  Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1556  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1557  } else
1558  llvm_unreachable("Not a shifted negative immediate");
1559  }
1560 
1561  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1562  assert(N == 1 && "Invalid number of operands!");
1564  }
1565 
1566  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1567  assert(N == 1 && "Invalid number of operands!");
1568  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1569  if (!MCE)
1570  addExpr(Inst, getImm());
1571  else
1572  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1573  }
1574 
1575  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1576  addImmOperands(Inst, N);
1577  }
1578 
1579  template<int Scale>
1580  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1581  assert(N == 1 && "Invalid number of operands!");
1582  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1583 
1584  if (!MCE) {
1585  Inst.addOperand(MCOperand::createExpr(getImm()));
1586  return;
1587  }
1588  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1589  }
1590 
1591  void addUImm6Operands(MCInst &Inst, unsigned N) const {
1592  assert(N == 1 && "Invalid number of operands!");
1593  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1595  }
1596 
1597  template <int Scale>
1598  void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1599  assert(N == 1 && "Invalid number of operands!");
1600  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1601  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1602  }
1603 
1604  template <typename T>
1605  void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1606  assert(N == 1 && "Invalid number of operands!");
1607  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1608  typename std::make_unsigned<T>::type Val = MCE->getValue();
1609  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1610  Inst.addOperand(MCOperand::createImm(encoding));
1611  }
1612 
1613  template <typename T>
1614  void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1615  assert(N == 1 && "Invalid number of operands!");
1616  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1617  typename std::make_unsigned<T>::type Val = ~MCE->getValue();
1618  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1619  Inst.addOperand(MCOperand::createImm(encoding));
1620  }
1621 
1622  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1623  assert(N == 1 && "Invalid number of operands!");
1624  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1625  uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1626  Inst.addOperand(MCOperand::createImm(encoding));
1627  }
1628 
1629  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1630  // Branch operands don't encode the low bits, so shift them off
1631  // here. If it's a label, however, just put it on directly as there's
1632  // not enough information now to do anything.
1633  assert(N == 1 && "Invalid number of operands!");
1634  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1635  if (!MCE) {
1636  addExpr(Inst, getImm());
1637  return;
1638  }
1639  assert(MCE && "Invalid constant immediate operand!");
1640  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1641  }
1642 
1643  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1644  // Branch operands don't encode the low bits, so shift them off
1645  // here. If it's a label, however, just put it on directly as there's
1646  // not enough information now to do anything.
1647  assert(N == 1 && "Invalid number of operands!");
1648  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1649  if (!MCE) {
1650  addExpr(Inst, getImm());
1651  return;
1652  }
1653  assert(MCE && "Invalid constant immediate operand!");
1654  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1655  }
1656 
1657  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1658  // Branch operands don't encode the low bits, so shift them off
1659  // here. If it's a label, however, just put it on directly as there's
1660  // not enough information now to do anything.
1661  assert(N == 1 && "Invalid number of operands!");
1662  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1663  if (!MCE) {
1664  addExpr(Inst, getImm());
1665  return;
1666  }
1667  assert(MCE && "Invalid constant immediate operand!");
1668  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1669  }
1670 
1671  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1672  assert(N == 1 && "Invalid number of operands!");
1674  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1675  }
1676 
1677  void addBarrierOperands(MCInst &Inst, unsigned N) const {
1678  assert(N == 1 && "Invalid number of operands!");
1679  Inst.addOperand(MCOperand::createImm(getBarrier()));
1680  }
1681 
1682  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1683  assert(N == 1 && "Invalid number of operands!");
1684 
1685  Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1686  }
1687 
1688  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1689  assert(N == 1 && "Invalid number of operands!");
1690 
1691  Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1692  }
1693 
1694  void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1695  assert(N == 1 && "Invalid number of operands!");
1696 
1697  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1698  }
1699 
1700  void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1701  assert(N == 1 && "Invalid number of operands!");
1702 
1703  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1704  }
1705 
1706  void addSysCROperands(MCInst &Inst, unsigned N) const {
1707  assert(N == 1 && "Invalid number of operands!");
1708  Inst.addOperand(MCOperand::createImm(getSysCR()));
1709  }
1710 
1711  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1712  assert(N == 1 && "Invalid number of operands!");
1713  Inst.addOperand(MCOperand::createImm(getPrefetch()));
1714  }
1715 
1716  void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1717  assert(N == 1 && "Invalid number of operands!");
1718  Inst.addOperand(MCOperand::createImm(getPSBHint()));
1719  }
1720 
1721  void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1722  assert(N == 1 && "Invalid number of operands!");
1723  Inst.addOperand(MCOperand::createImm(getBTIHint()));
1724  }
1725 
1726  void addShifterOperands(MCInst &Inst, unsigned N) const {
1727  assert(N == 1 && "Invalid number of operands!");
1728  unsigned Imm =
1729  AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1730  Inst.addOperand(MCOperand::createImm(Imm));
1731  }
1732 
1733  void addExtendOperands(MCInst &Inst, unsigned N) const {
1734  assert(N == 1 && "Invalid number of operands!");
1735  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1736  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1737  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1738  Inst.addOperand(MCOperand::createImm(Imm));
1739  }
1740 
1741  void addExtend64Operands(MCInst &Inst, unsigned N) const {
1742  assert(N == 1 && "Invalid number of operands!");
1743  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1744  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1745  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1746  Inst.addOperand(MCOperand::createImm(Imm));
1747  }
1748 
1749  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1750  assert(N == 2 && "Invalid number of operands!");
1751  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1752  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1753  Inst.addOperand(MCOperand::createImm(IsSigned));
1754  Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1755  }
1756 
1757  // For 8-bit load/store instructions with a register offset, both the
1758  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1759  // they're disambiguated by whether the shift was explicit or implicit rather
1760  // than its size.
1761  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1762  assert(N == 2 && "Invalid number of operands!");
1763  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1764  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1765  Inst.addOperand(MCOperand::createImm(IsSigned));
1766  Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1767  }
1768 
1769  template<int Shift>
1770  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1771  assert(N == 1 && "Invalid number of operands!");
1772 
1773  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1774  uint64_t Value = CE->getValue();
1775  Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1776  }
1777 
1778  template<int Shift>
1779  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1780  assert(N == 1 && "Invalid number of operands!");
1781 
1782  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1783  uint64_t Value = CE->getValue();
1784  Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1785  }
1786 
1787  void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1788  assert(N == 1 && "Invalid number of operands!");
1789  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1790  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1791  }
1792 
1793  void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1794  assert(N == 1 && "Invalid number of operands!");
1795  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1796  Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1797  }
1798 
1799  void print(raw_ostream &OS) const override;
1800 
1801  static std::unique_ptr<AArch64Operand>
1802  CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1803  auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1804  Op->Tok.Data = Str.data();
1805  Op->Tok.Length = Str.size();
1806  Op->Tok.IsSuffix = IsSuffix;
1807  Op->StartLoc = S;
1808  Op->EndLoc = S;
1809  return Op;
1810  }
1811 
1812  static std::unique_ptr<AArch64Operand>
1813  CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1814  RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1816  unsigned ShiftAmount = 0,
1817  unsigned HasExplicitAmount = false) {
1818  auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1819  Op->Reg.RegNum = RegNum;
1820  Op->Reg.Kind = Kind;
1821  Op->Reg.ElementWidth = 0;
1822  Op->Reg.EqualityTy = EqTy;
1823  Op->Reg.ShiftExtend.Type = ExtTy;
1824  Op->Reg.ShiftExtend.Amount = ShiftAmount;
1825  Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1826  Op->StartLoc = S;
1827  Op->EndLoc = E;
1828  return Op;
1829  }
1830 
1831  static std::unique_ptr<AArch64Operand>
1832  CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1833  SMLoc S, SMLoc E, MCContext &Ctx,
1835  unsigned ShiftAmount = 0,
1836  unsigned HasExplicitAmount = false) {
1837  assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
1838  Kind == RegKind::SVEPredicateVector) &&
1839  "Invalid vector kind");
1840  auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1841  HasExplicitAmount);
1842  Op->Reg.ElementWidth = ElementWidth;
1843  return Op;
1844  }
1845 
1846  static std::unique_ptr<AArch64Operand>
1847  CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1848  unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1849  MCContext &Ctx) {
1850  auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
1851  Op->VectorList.RegNum = RegNum;
1852  Op->VectorList.Count = Count;
1853  Op->VectorList.NumElements = NumElements;
1854  Op->VectorList.ElementWidth = ElementWidth;
1855  Op->VectorList.RegisterKind = RegisterKind;
1856  Op->StartLoc = S;
1857  Op->EndLoc = E;
1858  return Op;
1859  }
1860 
1861  static std::unique_ptr<AArch64Operand>
1862  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1863  auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1864  Op->VectorIndex.Val = Idx;
1865  Op->StartLoc = S;
1866  Op->EndLoc = E;
1867  return Op;
1868  }
1869 
1870  static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1871  SMLoc E, MCContext &Ctx) {
1872  auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
1873  Op->Imm.Val = Val;
1874  Op->StartLoc = S;
1875  Op->EndLoc = E;
1876  return Op;
1877  }
1878 
1879  static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1880  unsigned ShiftAmount,
1881  SMLoc S, SMLoc E,
1882  MCContext &Ctx) {
1883  auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1884  Op->ShiftedImm .Val = Val;
1885  Op->ShiftedImm.ShiftAmount = ShiftAmount;
1886  Op->StartLoc = S;
1887  Op->EndLoc = E;
1888  return Op;
1889  }
1890 
1891  static std::unique_ptr<AArch64Operand>
1892  CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1893  auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
1894  Op->CondCode.Code = Code;
1895  Op->StartLoc = S;
1896  Op->EndLoc = E;
1897  return Op;
1898  }
1899 
1900  static std::unique_ptr<AArch64Operand>
1901  CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1902  auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
1903  Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1904  Op->FPImm.IsExact = IsExact;
1905  Op->StartLoc = S;
1906  Op->EndLoc = S;
1907  return Op;
1908  }
1909 
1910  static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1911  StringRef Str,
1912  SMLoc S,
1913  MCContext &Ctx) {
1914  auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
1915  Op->Barrier.Val = Val;
1916  Op->Barrier.Data = Str.data();
1917  Op->Barrier.Length = Str.size();
1918  Op->StartLoc = S;
1919  Op->EndLoc = S;
1920  return Op;
1921  }
1922 
1923  static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1924  uint32_t MRSReg,
1925  uint32_t MSRReg,
1926  uint32_t PStateField,
1927  MCContext &Ctx) {
1928  auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
1929  Op->SysReg.Data = Str.data();
1930  Op->SysReg.Length = Str.size();
1931  Op->SysReg.MRSReg = MRSReg;
1932  Op->SysReg.MSRReg = MSRReg;
1933  Op->SysReg.PStateField = PStateField;
1934  Op->StartLoc = S;
1935  Op->EndLoc = S;
1936  return Op;
1937  }
1938 
1939  static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1940  SMLoc E, MCContext &Ctx) {
1941  auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
1942  Op->SysCRImm.Val = Val;
1943  Op->StartLoc = S;
1944  Op->EndLoc = E;
1945  return Op;
1946  }
1947 
1948  static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1949  StringRef Str,
1950  SMLoc S,
1951  MCContext &Ctx) {
1952  auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
1953  Op->Prefetch.Val = Val;
1954  Op->Barrier.Data = Str.data();
1955  Op->Barrier.Length = Str.size();
1956  Op->StartLoc = S;
1957  Op->EndLoc = S;
1958  return Op;
1959  }
1960 
1961  static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1962  StringRef Str,
1963  SMLoc S,
1964  MCContext &Ctx) {
1965  auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
1966  Op->PSBHint.Val = Val;
1967  Op->PSBHint.Data = Str.data();
1968  Op->PSBHint.Length = Str.size();
1969  Op->StartLoc = S;
1970  Op->EndLoc = S;
1971  return Op;
1972  }
1973 
1974  static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
1975  StringRef Str,
1976  SMLoc S,
1977  MCContext &Ctx) {
1978  auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
1979  Op->BTIHint.Val = Val << 1 | 32;
1980  Op->BTIHint.Data = Str.data();
1981  Op->BTIHint.Length = Str.size();
1982  Op->StartLoc = S;
1983  Op->EndLoc = S;
1984  return Op;
1985  }
1986 
1987  static std::unique_ptr<AArch64Operand>
1988  CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1989  bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1990  auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1991  Op->ShiftExtend.Type = ShOp;
1992  Op->ShiftExtend.Amount = Val;
1993  Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1994  Op->StartLoc = S;
1995  Op->EndLoc = E;
1996  return Op;
1997  }
1998 };
1999 
2000 } // end anonymous namespace.
2001 
2002 void AArch64Operand::print(raw_ostream &OS) const {
2003  switch (Kind) {
2004  case k_FPImm:
2005  OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2006  if (!getFPImmIsExact())
2007  OS << " (inexact)";
2008  OS << ">";
2009  break;
2010  case k_Barrier: {
2011  StringRef Name = getBarrierName();
2012  if (!Name.empty())
2013  OS << "<barrier " << Name << ">";
2014  else
2015  OS << "<barrier invalid #" << getBarrier() << ">";
2016  break;
2017  }
2018  case k_Immediate:
2019  OS << *getImm();
2020  break;
2021  case k_ShiftedImm: {
2022  unsigned Shift = getShiftedImmShift();
2023  OS << "<shiftedimm ";
2024  OS << *getShiftedImmVal();
2025  OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2026  break;
2027  }
2028  case k_CondCode:
2029  OS << "<condcode " << getCondCode() << ">";
2030  break;
2031  case k_VectorList: {
2032  OS << "<vectorlist ";
2033  unsigned Reg = getVectorListStart();
2034  for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2035  OS << Reg + i << " ";
2036  OS << ">";
2037  break;
2038  }
2039  case k_VectorIndex:
2040  OS << "<vectorindex " << getVectorIndex() << ">";
2041  break;
2042  case k_SysReg:
2043  OS << "<sysreg: " << getSysReg() << '>';
2044  break;
2045  case k_Token:
2046  OS << "'" << getToken() << "'";
2047  break;
2048  case k_SysCR:
2049  OS << "c" << getSysCR();
2050  break;
2051  case k_Prefetch: {
2052  StringRef Name = getPrefetchName();
2053  if (!Name.empty())
2054  OS << "<prfop " << Name << ">";
2055  else
2056  OS << "<prfop invalid #" << getPrefetch() << ">";
2057  break;
2058  }
2059  case k_PSBHint:
2060  OS << getPSBHintName();
2061  break;
2062  case k_Register:
2063  OS << "<register " << getReg() << ">";
2064  if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2065  break;
2067  case k_BTIHint:
2068  OS << getBTIHintName();
2069  break;
2070  case k_ShiftExtend:
2071  OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2072  << getShiftExtendAmount();
2073  if (!hasShiftExtendAmount())
2074  OS << "<imp>";
2075  OS << '>';
2076  break;
2077  }
2078 }
2079 
2080 /// @name Auto-generated Match Functions
2081 /// {
2082 
2083 static unsigned MatchRegisterName(StringRef Name);
2084 
2085 /// }
2086 
2087 static unsigned MatchNeonVectorRegName(StringRef Name) {
2088  return StringSwitch<unsigned>(Name.lower())
2089  .Case("v0", AArch64::Q0)
2090  .Case("v1", AArch64::Q1)
2091  .Case("v2", AArch64::Q2)
2092  .Case("v3", AArch64::Q3)
2093  .Case("v4", AArch64::Q4)
2094  .Case("v5", AArch64::Q5)
2095  .Case("v6", AArch64::Q6)
2096  .Case("v7", AArch64::Q7)
2097  .Case("v8", AArch64::Q8)
2098  .Case("v9", AArch64::Q9)
2099  .Case("v10", AArch64::Q10)
2100  .Case("v11", AArch64::Q11)
2101  .Case("v12", AArch64::Q12)
2102  .Case("v13", AArch64::Q13)
2103  .Case("v14", AArch64::Q14)
2104  .Case("v15", AArch64::Q15)
2105  .Case("v16", AArch64::Q16)
2106  .Case("v17", AArch64::Q17)
2107  .Case("v18", AArch64::Q18)
2108  .Case("v19", AArch64::Q19)
2109  .Case("v20", AArch64::Q20)
2110  .Case("v21", AArch64::Q21)
2111  .Case("v22", AArch64::Q22)
2112  .Case("v23", AArch64::Q23)
2113  .Case("v24", AArch64::Q24)
2114  .Case("v25", AArch64::Q25)
2115  .Case("v26", AArch64::Q26)
2116  .Case("v27", AArch64::Q27)
2117  .Case("v28", AArch64::Q28)
2118  .Case("v29", AArch64::Q29)
2119  .Case("v30", AArch64::Q30)
2120  .Case("v31", AArch64::Q31)
2121  .Default(0);
2122 }
2123 
2124 /// Returns an optional pair of (#elements, element-width) if Suffix
2125 /// is a valid vector kind. Where the number of elements in a vector
2126 /// or the vector width is implicit or explicitly unknown (but still a
2127 /// valid suffix kind), 0 is used.
2128 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2129  RegKind VectorKind) {
2130  std::pair<int, int> Res = {-1, -1};
2131 
2132  switch (VectorKind) {
2133  case RegKind::NeonVector:
2134  Res =
2135  StringSwitch<std::pair<int, int>>(Suffix.lower())
2136  .Case("", {0, 0})
2137  .Case(".1d", {1, 64})
2138  .Case(".1q", {1, 128})
2139  // '.2h' needed for fp16 scalar pairwise reductions
2140  .Case(".2h", {2, 16})
2141  .Case(".2s", {2, 32})
2142  .Case(".2d", {2, 64})
2143  // '.4b' is another special case for the ARMv8.2a dot product
2144  // operand
2145  .Case(".4b", {4, 8})
2146  .Case(".4h", {4, 16})
2147  .Case(".4s", {4, 32})
2148  .Case(".8b", {8, 8})
2149  .Case(".8h", {8, 16})
2150  .Case(".16b", {16, 8})
2151  // Accept the width neutral ones, too, for verbose syntax. If those
2152  // aren't used in the right places, the token operand won't match so
2153  // all will work out.
2154  .Case(".b", {0, 8})
2155  .Case(".h", {0, 16})
2156  .Case(".s", {0, 32})
2157  .Case(".d", {0, 64})
2158  .Default({-1, -1});
2159  break;
2160  case RegKind::SVEPredicateVector:
2161  case RegKind::SVEDataVector:
2162  Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2163  .Case("", {0, 0})
2164  .Case(".b", {0, 8})
2165  .Case(".h", {0, 16})
2166  .Case(".s", {0, 32})
2167  .Case(".d", {0, 64})
2168  .Case(".q", {0, 128})
2169  .Default({-1, -1});
2170  break;
2171  default:
2172  llvm_unreachable("Unsupported RegKind");
2173  }
2174 
2175  if (Res == std::make_pair(-1, -1))
2176  return Optional<std::pair<int, int>>();
2177 
2178  return Optional<std::pair<int, int>>(Res);
2179 }
2180 
2181 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2182  return parseVectorKind(Suffix, VectorKind).hasValue();
2183 }
2184 
2185 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2186  return StringSwitch<unsigned>(Name.lower())
2187  .Case("z0", AArch64::Z0)
2188  .Case("z1", AArch64::Z1)
2189  .Case("z2", AArch64::Z2)
2190  .Case("z3", AArch64::Z3)
2191  .Case("z4", AArch64::Z4)
2192  .Case("z5", AArch64::Z5)
2193  .Case("z6", AArch64::Z6)
2194  .Case("z7", AArch64::Z7)
2195  .Case("z8", AArch64::Z8)
2196  .Case("z9", AArch64::Z9)
2197  .Case("z10", AArch64::Z10)
2198  .Case("z11", AArch64::Z11)
2199  .Case("z12", AArch64::Z12)
2200  .Case("z13", AArch64::Z13)
2201  .Case("z14", AArch64::Z14)
2202  .Case("z15", AArch64::Z15)
2203  .Case("z16", AArch64::Z16)
2204  .Case("z17", AArch64::Z17)
2205  .Case("z18", AArch64::Z18)
2206  .Case("z19", AArch64::Z19)
2207  .Case("z20", AArch64::Z20)
2208  .Case("z21", AArch64::Z21)
2209  .Case("z22", AArch64::Z22)
2210  .Case("z23", AArch64::Z23)
2211  .Case("z24", AArch64::Z24)
2212  .Case("z25", AArch64::Z25)
2213  .Case("z26", AArch64::Z26)
2214  .Case("z27", AArch64::Z27)
2215  .Case("z28", AArch64::Z28)
2216  .Case("z29", AArch64::Z29)
2217  .Case("z30", AArch64::Z30)
2218  .Case("z31", AArch64::Z31)
2219  .Default(0);
2220 }
2221 
2222 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2223  return StringSwitch<unsigned>(Name.lower())
2224  .Case("p0", AArch64::P0)
2225  .Case("p1", AArch64::P1)
2226  .Case("p2", AArch64::P2)
2227  .Case("p3", AArch64::P3)
2228  .Case("p4", AArch64::P4)
2229  .Case("p5", AArch64::P5)
2230  .Case("p6", AArch64::P6)
2231  .Case("p7", AArch64::P7)
2232  .Case("p8", AArch64::P8)
2233  .Case("p9", AArch64::P9)
2234  .Case("p10", AArch64::P10)
2235  .Case("p11", AArch64::P11)
2236  .Case("p12", AArch64::P12)
2237  .Case("p13", AArch64::P13)
2238  .Case("p14", AArch64::P14)
2239  .Case("p15", AArch64::P15)
2240  .Default(0);
2241 }
2242 
2243 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2244  SMLoc &EndLoc) {
2245  StartLoc = getLoc();
2246  auto Res = tryParseScalarRegister(RegNo);
2247  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2248  return Res != MatchOperand_Success;
2249 }
2250 
2251 // Matches a register name or register alias previously defined by '.req'
2252 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2253  RegKind Kind) {
2254  unsigned RegNum = 0;
2255  if ((RegNum = matchSVEDataVectorRegName(Name)))
2256  return Kind == RegKind::SVEDataVector ? RegNum : 0;
2257 
2258  if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2259  return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2260 
2261  if ((RegNum = MatchNeonVectorRegName(Name)))
2262  return Kind == RegKind::NeonVector ? RegNum : 0;
2263 
2264  // The parsed register must be of RegKind Scalar
2265  if ((RegNum = MatchRegisterName(Name)))
2266  return Kind == RegKind::Scalar ? RegNum : 0;
2267 
2268  if (!RegNum) {
2269  // Handle a few common aliases of registers.
2270  if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2271  .Case("fp", AArch64::FP)
2272  .Case("lr", AArch64::LR)
2273  .Case("x31", AArch64::XZR)
2274  .Case("w31", AArch64::WZR)
2275  .Default(0))
2276  return Kind == RegKind::Scalar ? RegNum : 0;
2277 
2278  // Check for aliases registered via .req. Canonicalize to lower case.
2279  // That's more consistent since register names are case insensitive, and
2280  // it's how the original entry was passed in from MC/MCParser/AsmParser.
2281  auto Entry = RegisterReqs.find(Name.lower());
2282  if (Entry == RegisterReqs.end())
2283  return 0;
2284 
2285  // set RegNum if the match is the right kind of register
2286  if (Kind == Entry->getValue().first)
2287  RegNum = Entry->getValue().second;
2288  }
2289  return RegNum;
2290 }
2291 
2292 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2293 /// Identifier when called, and if it is a register name the token is eaten and
2294 /// the register is added to the operand list.
2296 AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2297  MCAsmParser &Parser = getParser();
2298  const AsmToken &Tok = Parser.getTok();
2299  if (Tok.isNot(AsmToken::Identifier))
2300  return MatchOperand_NoMatch;
2301 
2302  std::string lowerCase = Tok.getString().lower();
2303  unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2304  if (Reg == 0)
2305  return MatchOperand_NoMatch;
2306 
2307  RegNum = Reg;
2308  Parser.Lex(); // Eat identifier token.
2309  return MatchOperand_Success;
2310 }
2311 
2312 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2314 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2315  MCAsmParser &Parser = getParser();
2316  SMLoc S = getLoc();
2317 
2318  if (Parser.getTok().isNot(AsmToken::Identifier)) {
2319  Error(S, "Expected cN operand where 0 <= N <= 15");
2320  return MatchOperand_ParseFail;
2321  }
2322 
2323  StringRef Tok = Parser.getTok().getIdentifier();
2324  if (Tok[0] != 'c' && Tok[0] != 'C') {
2325  Error(S, "Expected cN operand where 0 <= N <= 15");
2326  return MatchOperand_ParseFail;
2327  }
2328 
2329  uint32_t CRNum;
2330  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2331  if (BadNum || CRNum > 15) {
2332  Error(S, "Expected cN operand where 0 <= N <= 15");
2333  return MatchOperand_ParseFail;
2334  }
2335 
2336  Parser.Lex(); // Eat identifier token.
2337  Operands.push_back(
2338  AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2339  return MatchOperand_Success;
2340 }
2341 
2342 /// tryParsePrefetch - Try to parse a prefetch operand.
2343 template <bool IsSVEPrefetch>
2345 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2346  MCAsmParser &Parser = getParser();
2347  SMLoc S = getLoc();
2348  const AsmToken &Tok = Parser.getTok();
2349 
2350  auto LookupByName = [](StringRef N) {
2351  if (IsSVEPrefetch) {
2352  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2353  return Optional<unsigned>(Res->Encoding);
2354  } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2355  return Optional<unsigned>(Res->Encoding);
2356  return Optional<unsigned>();
2357  };
2358 
2359  auto LookupByEncoding = [](unsigned E) {
2360  if (IsSVEPrefetch) {
2361  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2362  return Optional<StringRef>(Res->Name);
2363  } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2364  return Optional<StringRef>(Res->Name);
2365  return Optional<StringRef>();
2366  };
2367  unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2368 
2369  // Either an identifier for named values or a 5-bit immediate.
2370  // Eat optional hash.
2371  if (parseOptionalToken(AsmToken::Hash) ||
2372  Tok.is(AsmToken::Integer)) {
2373  const MCExpr *ImmVal;
2374  if (getParser().parseExpression(ImmVal))
2375  return MatchOperand_ParseFail;
2376 
2377  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2378  if (!MCE) {
2379  TokError("immediate value expected for prefetch operand");
2380  return MatchOperand_ParseFail;
2381  }
2382  unsigned prfop = MCE->getValue();
2383  if (prfop > MaxVal) {
2384  TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2385  "] expected");
2386  return MatchOperand_ParseFail;
2387  }
2388 
2389  auto PRFM = LookupByEncoding(MCE->getValue());
2390  Operands.push_back(AArch64Operand::CreatePrefetch(
2391  prfop, PRFM.getValueOr(""), S, getContext()));
2392  return MatchOperand_Success;
2393  }
2394 
2395  if (Tok.isNot(AsmToken::Identifier)) {
2396  TokError("prefetch hint expected");
2397  return MatchOperand_ParseFail;
2398  }
2399 
2400  auto PRFM = LookupByName(Tok.getString());
2401  if (!PRFM) {
2402  TokError("prefetch hint expected");
2403  return MatchOperand_ParseFail;
2404  }
2405 
2406  Parser.Lex(); // Eat identifier token.
2407  Operands.push_back(AArch64Operand::CreatePrefetch(
2408  *PRFM, Tok.getString(), S, getContext()));
2409  return MatchOperand_Success;
2410 }
2411 
2412 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2414 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2415  MCAsmParser &Parser = getParser();
2416  SMLoc S = getLoc();
2417  const AsmToken &Tok = Parser.getTok();
2418  if (Tok.isNot(AsmToken::Identifier)) {
2419  TokError("invalid operand for instruction");
2420  return MatchOperand_ParseFail;
2421  }
2422 
2423  auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2424  if (!PSB) {
2425  TokError("invalid operand for instruction");
2426  return MatchOperand_ParseFail;
2427  }
2428 
2429  Parser.Lex(); // Eat identifier token.
2430  Operands.push_back(AArch64Operand::CreatePSBHint(
2431  PSB->Encoding, Tok.getString(), S, getContext()));
2432  return MatchOperand_Success;
2433 }
2434 
2435 /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2437 AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2438  MCAsmParser &Parser = getParser();
2439  SMLoc S = getLoc();
2440  const AsmToken &Tok = Parser.getTok();
2441  if (Tok.isNot(AsmToken::Identifier)) {
2442  TokError("invalid operand for instruction");
2443  return MatchOperand_ParseFail;
2444  }
2445 
2446  auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2447  if (!BTI) {
2448  TokError("invalid operand for instruction");
2449  return MatchOperand_ParseFail;
2450  }
2451 
2452  Parser.Lex(); // Eat identifier token.
2453  Operands.push_back(AArch64Operand::CreateBTIHint(
2454  BTI->Encoding, Tok.getString(), S, getContext()));
2455  return MatchOperand_Success;
2456 }
2457 
2458 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2459 /// instruction.
2461 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2462  MCAsmParser &Parser = getParser();
2463  SMLoc S = getLoc();
2464  const MCExpr *Expr = nullptr;
2465 
2466  if (Parser.getTok().is(AsmToken::Hash)) {
2467  Parser.Lex(); // Eat hash token.
2468  }
2469 
2470  if (parseSymbolicImmVal(Expr))
2471  return MatchOperand_ParseFail;
2472 
2473  AArch64MCExpr::VariantKind ELFRefKind;
2474  MCSymbolRefExpr::VariantKind DarwinRefKind;
2475  int64_t Addend;
2476  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2477  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2478  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2479  // No modifier was specified at all; this is the syntax for an ELF basic
2480  // ADRP relocation (unfortunately).
2481  Expr =
2483  } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2484  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2485  Addend != 0) {
2486  Error(S, "gotpage label reference not allowed an addend");
2487  return MatchOperand_ParseFail;
2488  } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2489  DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2490  DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2491  ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2492  ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2493  ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2494  ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2495  // The operand must be an @page or @gotpage qualified symbolref.
2496  Error(S, "page or gotpage label reference expected");
2497  return MatchOperand_ParseFail;
2498  }
2499  }
2500 
2501  // We have either a label reference possibly with addend or an immediate. The
2502  // addend is a raw value here. The linker will adjust it to only reference the
2503  // page.
2504  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2505  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2506 
2507  return MatchOperand_Success;
2508 }
2509 
2510 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2511 /// instruction.
2513 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2514  SMLoc S = getLoc();
2515  const MCExpr *Expr = nullptr;
2516 
2517  // Leave anything with a bracket to the default for SVE
2518  if (getParser().getTok().is(AsmToken::LBrac))
2519  return MatchOperand_NoMatch;
2520 
2521  if (getParser().getTok().is(AsmToken::Hash))
2522  getParser().Lex(); // Eat hash token.
2523 
2524  if (parseSymbolicImmVal(Expr))
2525  return MatchOperand_ParseFail;
2526 
2527  AArch64MCExpr::VariantKind ELFRefKind;
2528  MCSymbolRefExpr::VariantKind DarwinRefKind;
2529  int64_t Addend;
2530  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2531  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2532  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2533  // No modifier was specified at all; this is the syntax for an ELF basic
2534  // ADR relocation (unfortunately).
2535  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2536  } else {
2537  Error(S, "unexpected adr label");
2538  return MatchOperand_ParseFail;
2539  }
2540  }
2541 
2542  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2543  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2544  return MatchOperand_Success;
2545 }
2546 
2547 /// tryParseFPImm - A floating point immediate expression operand.
2548 template<bool AddFPZeroAsLiteral>
2550 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2551  MCAsmParser &Parser = getParser();
2552  SMLoc S = getLoc();
2553 
2554  bool Hash = parseOptionalToken(AsmToken::Hash);
2555 
2556  // Handle negation, as that still comes through as a separate token.
2557  bool isNegative = parseOptionalToken(AsmToken::Minus);
2558 
2559  const AsmToken &Tok = Parser.getTok();
2560  if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2561  if (!Hash)
2562  return MatchOperand_NoMatch;
2563  TokError("invalid floating point immediate");
2564  return MatchOperand_ParseFail;
2565  }
2566 
2567  // Parse hexadecimal representation.
2568  if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2569  if (Tok.getIntVal() > 255 || isNegative) {
2570  TokError("encoded floating point value out of range");
2571  return MatchOperand_ParseFail;
2572  }
2573 
2574  APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2575  Operands.push_back(
2576  AArch64Operand::CreateFPImm(F, true, S, getContext()));
2577  } else {
2578  // Parse FP representation.
2579  APFloat RealVal(APFloat::IEEEdouble());
2580  auto Status =
2582  if (isNegative)
2583  RealVal.changeSign();
2584 
2585  if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2586  Operands.push_back(
2587  AArch64Operand::CreateToken("#0", false, S, getContext()));
2588  Operands.push_back(
2589  AArch64Operand::CreateToken(".0", false, S, getContext()));
2590  } else
2591  Operands.push_back(AArch64Operand::CreateFPImm(
2592  RealVal, Status == APFloat::opOK, S, getContext()));
2593  }
2594 
2595  Parser.Lex(); // Eat the token.
2596 
2597  return MatchOperand_Success;
2598 }
2599 
2600 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2601 /// a shift suffix, for example '#1, lsl #12'.
2603 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2604  MCAsmParser &Parser = getParser();
2605  SMLoc S = getLoc();
2606 
2607  if (Parser.getTok().is(AsmToken::Hash))
2608  Parser.Lex(); // Eat '#'
2609  else if (Parser.getTok().isNot(AsmToken::Integer))
2610  // Operand should start from # or should be integer, emit error otherwise.
2611  return MatchOperand_NoMatch;
2612 
2613  const MCExpr *Imm = nullptr;
2614  if (parseSymbolicImmVal(Imm))
2615  return MatchOperand_ParseFail;
2616  else if (Parser.getTok().isNot(AsmToken::Comma)) {
2617  SMLoc E = Parser.getTok().getLoc();
2618  Operands.push_back(
2619  AArch64Operand::CreateImm(Imm, S, E, getContext()));
2620  return MatchOperand_Success;
2621  }
2622 
2623  // Eat ','
2624  Parser.Lex();
2625 
2626  // The optional operand must be "lsl #N" where N is non-negative.
2627  if (!Parser.getTok().is(AsmToken::Identifier) ||
2628  !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2629  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2630  return MatchOperand_ParseFail;
2631  }
2632 
2633  // Eat 'lsl'
2634  Parser.Lex();
2635 
2636  parseOptionalToken(AsmToken::Hash);
2637 
2638  if (Parser.getTok().isNot(AsmToken::Integer)) {
2639  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2640  return MatchOperand_ParseFail;
2641  }
2642 
2643  int64_t ShiftAmount = Parser.getTok().getIntVal();
2644 
2645  if (ShiftAmount < 0) {
2646  Error(Parser.getTok().getLoc(), "positive shift amount required");
2647  return MatchOperand_ParseFail;
2648  }
2649  Parser.Lex(); // Eat the number
2650 
2651  // Just in case the optional lsl #0 is used for immediates other than zero.
2652  if (ShiftAmount == 0 && Imm != nullptr) {
2653  SMLoc E = Parser.getTok().getLoc();
2654  Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2655  return MatchOperand_Success;
2656  }
2657 
2658  SMLoc E = Parser.getTok().getLoc();
2659  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2660  S, E, getContext()));
2661  return MatchOperand_Success;
2662 }
2663 
2664 /// parseCondCodeString - Parse a Condition Code string.
2665 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2667  .Case("eq", AArch64CC::EQ)
2668  .Case("ne", AArch64CC::NE)
2669  .Case("cs", AArch64CC::HS)
2670  .Case("hs", AArch64CC::HS)
2671  .Case("cc", AArch64CC::LO)
2672  .Case("lo", AArch64CC::LO)
2673  .Case("mi", AArch64CC::MI)
2674  .Case("pl", AArch64CC::PL)
2675  .Case("vs", AArch64CC::VS)
2676  .Case("vc", AArch64CC::VC)
2677  .Case("hi", AArch64CC::HI)
2678  .Case("ls", AArch64CC::LS)
2679  .Case("ge", AArch64CC::GE)
2680  .Case("lt", AArch64CC::LT)
2681  .Case("gt", AArch64CC::GT)
2682  .Case("le", AArch64CC::LE)
2683  .Case("al", AArch64CC::AL)
2684  .Case("nv", AArch64CC::NV)
2686 
2687  if (CC == AArch64CC::Invalid &&
2688  getSTI().getFeatureBits()[AArch64::FeatureSVE])
2690  .Case("none", AArch64CC::EQ)
2691  .Case("any", AArch64CC::NE)
2692  .Case("nlast", AArch64CC::HS)
2693  .Case("last", AArch64CC::LO)
2694  .Case("first", AArch64CC::MI)
2695  .Case("nfrst", AArch64CC::PL)
2696  .Case("pmore", AArch64CC::HI)
2697  .Case("plast", AArch64CC::LS)
2698  .Case("tcont", AArch64CC::GE)
2699  .Case("tstop", AArch64CC::LT)
2701 
2702  return CC;
2703 }
2704 
2705 /// parseCondCode - Parse a Condition Code operand.
2706 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2707  bool invertCondCode) {
2708  MCAsmParser &Parser = getParser();
2709  SMLoc S = getLoc();
2710  const AsmToken &Tok = Parser.getTok();
2711  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2712 
2713  StringRef Cond = Tok.getString();
2714  AArch64CC::CondCode CC = parseCondCodeString(Cond);
2715  if (CC == AArch64CC::Invalid)
2716  return TokError("invalid condition code");
2717  Parser.Lex(); // Eat identifier token.
2718 
2719  if (invertCondCode) {
2720  if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2721  return TokError("condition codes AL and NV are invalid for this instruction");
2723  }
2724 
2725  Operands.push_back(
2726  AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2727  return false;
2728 }
2729 
2730 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2731 /// them if present.
2733 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2734  MCAsmParser &Parser = getParser();
2735  const AsmToken &Tok = Parser.getTok();
2736  std::string LowerID = Tok.getString().lower();
2739  .Case("lsl", AArch64_AM::LSL)
2740  .Case("lsr", AArch64_AM::LSR)
2741  .Case("asr", AArch64_AM::ASR)
2742  .Case("ror", AArch64_AM::ROR)
2743  .Case("msl", AArch64_AM::MSL)
2744  .Case("uxtb", AArch64_AM::UXTB)
2745  .Case("uxth", AArch64_AM::UXTH)
2746  .Case("uxtw", AArch64_AM::UXTW)
2747  .Case("uxtx", AArch64_AM::UXTX)
2748  .Case("sxtb", AArch64_AM::SXTB)
2749  .Case("sxth", AArch64_AM::SXTH)
2750  .Case("sxtw", AArch64_AM::SXTW)
2751  .Case("sxtx", AArch64_AM::SXTX)
2753 
2754  if (ShOp == AArch64_AM::InvalidShiftExtend)
2755  return MatchOperand_NoMatch;
2756 
2757  SMLoc S = Tok.getLoc();
2758  Parser.Lex();
2759 
2760  bool Hash = parseOptionalToken(AsmToken::Hash);
2761 
2762  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2763  if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2764  ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2765  ShOp == AArch64_AM::MSL) {
2766  // We expect a number here.
2767  TokError("expected #imm after shift specifier");
2768  return MatchOperand_ParseFail;
2769  }
2770 
2771  // "extend" type operations don't need an immediate, #0 is implicit.
2772  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2773  Operands.push_back(
2774  AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2775  return MatchOperand_Success;
2776  }
2777 
2778  // Make sure we do actually have a number, identifier or a parenthesized
2779  // expression.
2780  SMLoc E = Parser.getTok().getLoc();
2781  if (!Parser.getTok().is(AsmToken::Integer) &&
2782  !Parser.getTok().is(AsmToken::LParen) &&
2783  !Parser.getTok().is(AsmToken::Identifier)) {
2784  Error(E, "expected integer shift amount");
2785  return MatchOperand_ParseFail;
2786  }
2787 
2788  const MCExpr *ImmVal;
2789  if (getParser().parseExpression(ImmVal))
2790  return MatchOperand_ParseFail;
2791 
2792  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2793  if (!MCE) {
2794  Error(E, "expected constant '#imm' after shift specifier");
2795  return MatchOperand_ParseFail;
2796  }
2797 
2798  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2799  Operands.push_back(AArch64Operand::CreateShiftExtend(
2800  ShOp, MCE->getValue(), true, S, E, getContext()));
2801  return MatchOperand_Success;
2802 }
2803 
2804 static const struct Extension {
2805  const char *Name;
2807 } ExtensionMap[] = {
2808  {"crc", {AArch64::FeatureCRC}},
2809  {"sm4", {AArch64::FeatureSM4}},
2810  {"sha3", {AArch64::FeatureSHA3}},
2811  {"sha2", {AArch64::FeatureSHA2}},
2812  {"aes", {AArch64::FeatureAES}},
2813  {"crypto", {AArch64::FeatureCrypto}},
2814  {"fp", {AArch64::FeatureFPARMv8}},
2815  {"simd", {AArch64::FeatureNEON}},
2816  {"ras", {AArch64::FeatureRAS}},
2817  {"lse", {AArch64::FeatureLSE}},
2818  {"predres", {AArch64::FeaturePredRes}},
2819  {"ccdp", {AArch64::FeatureCacheDeepPersist}},
2820  {"mte", {AArch64::FeatureMTE}},
2821  {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
2822  {"pan-rwv", {AArch64::FeaturePAN_RWV}},
2823  {"ccpp", {AArch64::FeatureCCPP}},
2824  {"sve", {AArch64::FeatureSVE}},
2825  {"sve2", {AArch64::FeatureSVE2}},
2826  {"sve2-aes", {AArch64::FeatureSVE2AES}},
2827  {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
2828  {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
2829  {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
2830  // FIXME: Unsupported extensions
2831  {"pan", {}},
2832  {"lor", {}},
2833  {"rdma", {}},
2834  {"profile", {}},
2835 };
2836 
2837 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2838  if (FBS[AArch64::HasV8_1aOps])
2839  Str += "ARMv8.1a";
2840  else if (FBS[AArch64::HasV8_2aOps])
2841  Str += "ARMv8.2a";
2842  else if (FBS[AArch64::HasV8_3aOps])
2843  Str += "ARMv8.3a";
2844  else if (FBS[AArch64::HasV8_4aOps])
2845  Str += "ARMv8.4a";
2846  else if (FBS[AArch64::HasV8_5aOps])
2847  Str += "ARMv8.5a";
2848  else {
2849  auto ext = std::find_if(std::begin(ExtensionMap),
2851  [&](const Extension& e)
2852  // Use & in case multiple features are enabled
2853  { return (FBS & e.Features) != FeatureBitset(); }
2854  );
2855 
2856  Str += ext != std::end(ExtensionMap) ? ext->Name : "(unknown)";
2857  }
2858 }
2859 
2860 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2861  SMLoc S) {
2862  const uint16_t Op2 = Encoding & 7;
2863  const uint16_t Cm = (Encoding & 0x78) >> 3;
2864  const uint16_t Cn = (Encoding & 0x780) >> 7;
2865  const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2866 
2867  const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2868 
2869  Operands.push_back(
2870  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2871  Operands.push_back(
2872  AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2873  Operands.push_back(
2874  AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2875  Expr = MCConstantExpr::create(Op2, getContext());
2876  Operands.push_back(
2877  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2878 }
2879 
2880 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2881 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2882 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2883  OperandVector &Operands) {
2884  if (Name.find('.') != StringRef::npos)
2885  return TokError("invalid operand");
2886 
2887  Mnemonic = Name;
2888  Operands.push_back(
2889  AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2890 
2891  MCAsmParser &Parser = getParser();
2892  const AsmToken &Tok = Parser.getTok();
2893  StringRef Op = Tok.getString();
2894  SMLoc S = Tok.getLoc();
2895 
2896  if (Mnemonic == "ic") {
2897  const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2898  if (!IC)
2899  return TokError("invalid operand for IC instruction");
2900  else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2901  std::string Str("IC " + std::string(IC->Name) + " requires ");
2903  return TokError(Str.c_str());
2904  }
2905  createSysAlias(IC->Encoding, Operands, S);
2906  } else if (Mnemonic == "dc") {
2907  const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2908  if (!DC)
2909  return TokError("invalid operand for DC instruction");
2910  else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2911  std::string Str("DC " + std::string(DC->Name) + " requires ");
2913  return TokError(Str.c_str());
2914  }
2915  createSysAlias(DC->Encoding, Operands, S);
2916  } else if (Mnemonic == "at") {
2917  const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2918  if (!AT)
2919  return TokError("invalid operand for AT instruction");
2920  else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2921  std::string Str("AT " + std::string(AT->Name) + " requires ");
2923  return TokError(Str.c_str());
2924  }
2925  createSysAlias(AT->Encoding, Operands, S);
2926  } else if (Mnemonic == "tlbi") {
2927  const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2928  if (!TLBI)
2929  return TokError("invalid operand for TLBI instruction");
2930  else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2931  std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2933  return TokError(Str.c_str());
2934  }
2935  createSysAlias(TLBI->Encoding, Operands, S);
2936  } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
2937  const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
2938  if (!PRCTX)
2939  return TokError("invalid operand for prediction restriction instruction");
2940  else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
2941  std::string Str(
2942  Mnemonic.upper() + std::string(PRCTX->Name) + " requires ");
2944  return TokError(Str.c_str());
2945  }
2946  uint16_t PRCTX_Op2 =
2947  Mnemonic == "cfp" ? 4 :
2948  Mnemonic == "dvp" ? 5 :
2949  Mnemonic == "cpp" ? 7 :
2950  0;
2951  assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction");
2952  createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
2953  }
2954 
2955  Parser.Lex(); // Eat operand.
2956 
2957  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2958  bool HasRegister = false;
2959 
2960  // Check for the optional register operand.
2961  if (parseOptionalToken(AsmToken::Comma)) {
2962  if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2963  return TokError("expected register operand");
2964  HasRegister = true;
2965  }
2966 
2967  if (ExpectRegister && !HasRegister)
2968  return TokError("specified " + Mnemonic + " op requires a register");
2969  else if (!ExpectRegister && HasRegister)
2970  return TokError("specified " + Mnemonic + " op does not use a register");
2971 
2972  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2973  return true;
2974 
2975  return false;
2976 }
2977 
2979 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2980  MCAsmParser &Parser = getParser();
2981  const AsmToken &Tok = Parser.getTok();
2982 
2983  if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
2984  TokError("'csync' operand expected");
2985  return MatchOperand_ParseFail;
2986  // Can be either a #imm style literal or an option name
2987  } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
2988  // Immediate operand.
2989  const MCExpr *ImmVal;
2990  SMLoc ExprLoc = getLoc();
2991  if (getParser().parseExpression(ImmVal))
2992  return MatchOperand_ParseFail;
2993  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2994  if (!MCE) {
2995  Error(ExprLoc, "immediate value expected for barrier operand");
2996  return MatchOperand_ParseFail;
2997  }
2998  if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2999  Error(ExprLoc, "barrier operand out of range");
3000  return MatchOperand_ParseFail;
3001  }
3002  auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
3003  Operands.push_back(AArch64Operand::CreateBarrier(
3004  MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
3005  return MatchOperand_Success;
3006  }
3007 
3008  if (Tok.isNot(AsmToken::Identifier)) {
3009  TokError("invalid operand for instruction");
3010  return MatchOperand_ParseFail;
3011  }
3012 
3013  auto TSB = AArch64TSB::lookupTSBByName(Tok.getString());
3014  // The only valid named option for ISB is 'sy'
3015  auto DB = AArch64DB::lookupDBByName(Tok.getString());
3016  if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3017  TokError("'sy' or #imm operand expected");
3018  return MatchOperand_ParseFail;
3019  // The only valid named option for TSB is 'csync'
3020  } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3021  TokError("'csync' operand expected");
3022  return MatchOperand_ParseFail;
3023  } else if (!DB && !TSB) {
3024  TokError("invalid barrier option name");
3025  return MatchOperand_ParseFail;
3026  }
3027 
3028  Operands.push_back(AArch64Operand::CreateBarrier(
3029  DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext()));
3030  Parser.Lex(); // Consume the option
3031 
3032  return MatchOperand_Success;
3033 }
3034 
3036 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3037  MCAsmParser &Parser = getParser();
3038  const AsmToken &Tok = Parser.getTok();
3039 
3040  if (Tok.isNot(AsmToken::Identifier))
3041  return MatchOperand_NoMatch;
3042 
3043  int MRSReg, MSRReg;
3044  auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3045  if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3046  MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3047  MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3048  } else
3049  MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3050 
3051  auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3052  unsigned PStateImm = -1;
3053  if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3054  PStateImm = PState->Encoding;
3055 
3056  Operands.push_back(
3057  AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3058  PStateImm, getContext()));
3059  Parser.Lex(); // Eat identifier
3060 
3061  return MatchOperand_Success;
3062 }
3063 
3064 /// tryParseNeonVectorRegister - Parse a vector register operand.
3065 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3066  MCAsmParser &Parser = getParser();
3067  if (Parser.getTok().isNot(AsmToken::Identifier))
3068  return true;
3069 
3070  SMLoc S = getLoc();
3071  // Check for a vector register specifier first.
3072  StringRef Kind;
3073  unsigned Reg;
3074  OperandMatchResultTy Res =
3075  tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3076  if (Res != MatchOperand_Success)
3077  return true;
3078 
3079  const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3080  if (!KindRes)
3081  return true;
3082 
3083  unsigned ElementWidth = KindRes->second;
3084  Operands.push_back(
3085  AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3086  S, getLoc(), getContext()));
3087 
3088  // If there was an explicit qualifier, that goes on as a literal text
3089  // operand.
3090  if (!Kind.empty())
3091  Operands.push_back(
3092  AArch64Operand::CreateToken(Kind, false, S, getContext()));
3093 
3094  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3095 }
3096 
3098 AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3099  SMLoc SIdx = getLoc();
3100  if (parseOptionalToken(AsmToken::LBrac)) {
3101  const MCExpr *ImmVal;
3102  if (getParser().parseExpression(ImmVal))
3103  return MatchOperand_NoMatch;
3104  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3105  if (!MCE) {
3106  TokError("immediate value expected for vector index");
3107  return MatchOperand_ParseFail;;
3108  }
3109 
3110  SMLoc E = getLoc();
3111 
3112  if (parseToken(AsmToken::RBrac, "']' expected"))
3113  return MatchOperand_ParseFail;;
3114 
3115  Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3116  E, getContext()));
3117  return MatchOperand_Success;
3118  }
3119 
3120  return MatchOperand_NoMatch;
3121 }
3122 
3123 // tryParseVectorRegister - Try to parse a vector register name with
3124 // optional kind specifier. If it is a register specifier, eat the token
3125 // and return it.
3127 AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3128  RegKind MatchKind) {
3129  MCAsmParser &Parser = getParser();
3130  const AsmToken &Tok = Parser.getTok();
3131 
3132  if (Tok.isNot(AsmToken::Identifier))
3133  return MatchOperand_NoMatch;
3134 
3135  StringRef Name = Tok.getString();
3136  // If there is a kind specifier, it's separated from the register name by
3137  // a '.'.
3138  size_t Start = 0, Next = Name.find('.');
3139  StringRef Head = Name.slice(Start, Next);
3140  unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3141 
3142  if (RegNum) {
3143  if (Next != StringRef::npos) {
3144  Kind = Name.slice(Next, StringRef::npos);
3145  if (!isValidVectorKind(Kind, MatchKind)) {
3146  TokError("invalid vector kind qualifier");
3147  return MatchOperand_ParseFail;
3148  }
3149  }
3150  Parser.Lex(); // Eat the register token.
3151 
3152  Reg = RegNum;
3153  return MatchOperand_Success;
3154  }
3155 
3156  return MatchOperand_NoMatch;
3157 }
3158 
3159 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3161 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3162  // Check for a SVE predicate register specifier first.
3163  const SMLoc S = getLoc();
3164  StringRef Kind;
3165  unsigned RegNum;
3166  auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3167  if (Res != MatchOperand_Success)
3168  return Res;
3169 
3170  const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3171  if (!KindRes)
3172  return MatchOperand_NoMatch;
3173 
3174  unsigned ElementWidth = KindRes->second;
3175  Operands.push_back(AArch64Operand::CreateVectorReg(
3176  RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3177  getLoc(), getContext()));
3178 
3179  // Not all predicates are followed by a '/m' or '/z'.
3180  MCAsmParser &Parser = getParser();
3181  if (Parser.getTok().isNot(AsmToken::Slash))
3182  return MatchOperand_Success;
3183 
3184  // But when they do they shouldn't have an element type suffix.
3185  if (!Kind.empty()) {
3186  Error(S, "not expecting size suffix");
3187  return MatchOperand_ParseFail;
3188  }
3189 
3190  // Add a literal slash as operand
3191  Operands.push_back(
3192  AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3193 
3194  Parser.Lex(); // Eat the slash.
3195 
3196  // Zeroing or merging?
3197  auto Pred = Parser.getTok().getString().lower();
3198  if (Pred != "z" && Pred != "m") {
3199  Error(getLoc(), "expecting 'm' or 'z' predication");
3200  return MatchOperand_ParseFail;
3201  }
3202 
3203  // Add zero/merge token.
3204  const char *ZM = Pred == "z" ? "z" : "m";
3205  Operands.push_back(
3206  AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3207 
3208  Parser.Lex(); // Eat zero/merge token.
3209  return MatchOperand_Success;
3210 }
3211 
3212 /// parseRegister - Parse a register operand.
3213 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3214  // Try for a Neon vector register.
3215  if (!tryParseNeonVectorRegister(Operands))
3216  return false;
3217 
3218  // Otherwise try for a scalar register.
3219  if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3220  return false;
3221 
3222  return true;
3223 }
3224 
3225 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3226  MCAsmParser &Parser = getParser();
3227  bool HasELFModifier = false;
3229 
3230  if (parseOptionalToken(AsmToken::Colon)) {
3231  HasELFModifier = true;
3232 
3233  if (Parser.getTok().isNot(AsmToken::Identifier))
3234  return TokError("expect relocation specifier in operand after ':'");
3235 
3236  std::string LowerCase = Parser.getTok().getIdentifier().lower();
3237  RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3238  .Case("lo12", AArch64MCExpr::VK_LO12)
3239  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3240  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3241  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3242  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3243  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3244  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3245  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3246  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3247  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3248  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3249  .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3250  .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3251  .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3252  .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3253  .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3254  .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3255  .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3256  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3257  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3258  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3259  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3260  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3261  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3262  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3263  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3264  .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3265  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3266  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3267  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3268  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3269  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3270  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3271  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3272  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3273  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3275  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3277  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3278  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3279  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3281  .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3282  .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3284 
3285  if (RefKind == AArch64MCExpr::VK_INVALID)
3286  return TokError("expect relocation specifier in operand after ':'");
3287 
3288  Parser.Lex(); // Eat identifier
3289 
3290  if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3291  return true;
3292  }
3293 
3294  if (getParser().parseExpression(ImmVal))
3295  return true;
3296 
3297  if (HasELFModifier)
3298  ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3299 
3300  return false;
3301 }
3302 
3303 template <RegKind VectorKind>
3305 AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3306  bool ExpectMatch) {
3307  MCAsmParser &Parser = getParser();
3308  if (!Parser.getTok().is(AsmToken::LCurly))
3309  return MatchOperand_NoMatch;
3310 
3311  // Wrapper around parse function
3312  auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3313  bool NoMatchIsError) {
3314  auto RegTok = Parser.getTok();
3315  auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3316  if (ParseRes == MatchOperand_Success) {
3317  if (parseVectorKind(Kind, VectorKind))
3318  return ParseRes;
3319  llvm_unreachable("Expected a valid vector kind");
3320  }
3321 
3322  if (RegTok.isNot(AsmToken::Identifier) ||
3323  ParseRes == MatchOperand_ParseFail ||
3324  (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3325  Error(Loc, "vector register expected");
3326  return MatchOperand_ParseFail;
3327  }
3328 
3329  return MatchOperand_NoMatch;
3330  };
3331 
3332  SMLoc S = getLoc();
3333  auto LCurly = Parser.getTok();
3334  Parser.Lex(); // Eat left bracket token.
3335 
3336  StringRef Kind;
3337  unsigned FirstReg;
3338  auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3339 
3340  // Put back the original left bracket if there was no match, so that
3341  // different types of list-operands can be matched (e.g. SVE, Neon).
3342  if (ParseRes == MatchOperand_NoMatch)
3343  Parser.getLexer().UnLex(LCurly);
3344 
3345  if (ParseRes != MatchOperand_Success)
3346  return ParseRes;
3347 
3348  int64_t PrevReg = FirstReg;
3349  unsigned Count = 1;
3350 
3351  if (parseOptionalToken(AsmToken::Minus)) {
3352  SMLoc Loc = getLoc();
3353  StringRef NextKind;
3354 
3355  unsigned Reg;
3356  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3357  if (ParseRes != MatchOperand_Success)
3358  return ParseRes;
3359 
3360  // Any Kind suffices must match on all regs in the list.
3361  if (Kind != NextKind) {
3362  Error(Loc, "mismatched register size suffix");
3363  return MatchOperand_ParseFail;
3364  }
3365 
3366  unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3367 
3368  if (Space == 0 || Space > 3) {
3369  Error(Loc, "invalid number of vectors");
3370  return MatchOperand_ParseFail;
3371  }
3372 
3373  Count += Space;
3374  }
3375  else {
3376  while (parseOptionalToken(AsmToken::Comma)) {
3377  SMLoc Loc = getLoc();
3378  StringRef NextKind;
3379  unsigned Reg;
3380  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3381  if (ParseRes != MatchOperand_Success)
3382  return ParseRes;
3383 
3384  // Any Kind suffices must match on all regs in the list.
3385  if (Kind != NextKind) {
3386  Error(Loc, "mismatched register size suffix");
3387  return MatchOperand_ParseFail;
3388  }
3389 
3390  // Registers must be incremental (with wraparound at 31)
3391  if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3392  (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3393  Error(Loc, "registers must be sequential");
3394  return MatchOperand_ParseFail;
3395  }
3396 
3397  PrevReg = Reg;
3398  ++Count;
3399  }
3400  }
3401 
3402  if (parseToken(AsmToken::RCurly, "'}' expected"))
3403  return MatchOperand_ParseFail;
3404 
3405  if (Count > 4) {
3406  Error(S, "invalid number of vectors");
3407  return MatchOperand_ParseFail;
3408  }
3409 
3410  unsigned NumElements = 0;
3411  unsigned ElementWidth = 0;
3412  if (!Kind.empty()) {
3413  if (const auto &VK = parseVectorKind(Kind, VectorKind))
3414  std::tie(NumElements, ElementWidth) = *VK;
3415  }
3416 
3417  Operands.push_back(AArch64Operand::CreateVectorList(
3418  FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3419  getContext()));
3420 
3421  return MatchOperand_Success;
3422 }
3423 
3424 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3425 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3426  auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3427  if (ParseRes != MatchOperand_Success)
3428  return true;
3429 
3430  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3431 }
3432 
3434 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3435  SMLoc StartLoc = getLoc();
3436 
3437  unsigned RegNum;
3438  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3439  if (Res != MatchOperand_Success)
3440  return Res;
3441 
3442  if (!parseOptionalToken(AsmToken::Comma)) {
3443  Operands.push_back(AArch64Operand::CreateReg(
3444  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3445  return MatchOperand_Success;
3446  }
3447 
3448  parseOptionalToken(AsmToken::Hash);
3449 
3450  if (getParser().getTok().isNot(AsmToken::Integer)) {
3451  Error(getLoc(), "index must be absent or #0");
3452  return MatchOperand_ParseFail;
3453  }
3454 
3455  const MCExpr *ImmVal;
3456  if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3457  cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3458  Error(getLoc(), "index must be absent or #0");
3459  return MatchOperand_ParseFail;
3460  }
3461 
3462  Operands.push_back(AArch64Operand::CreateReg(
3463  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3464  return MatchOperand_Success;
3465 }
3466 
3467 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3469 AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3470  SMLoc StartLoc = getLoc();
3471 
3472  unsigned RegNum;
3473  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3474  if (Res != MatchOperand_Success)
3475  return Res;
3476 
3477  // No shift/extend is the default.
3478  if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3479  Operands.push_back(AArch64Operand::CreateReg(
3480  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3481  return MatchOperand_Success;
3482  }
3483 
3484  // Eat the comma
3485  getParser().Lex();
3486 
3487  // Match the shift
3489  Res = tryParseOptionalShiftExtend(ExtOpnd);
3490  if (Res != MatchOperand_Success)
3491  return Res;
3492 
3493  auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3494  Operands.push_back(AArch64Operand::CreateReg(
3495  RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3496  Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3497  Ext->hasShiftExtendAmount()));
3498 
3499  return MatchOperand_Success;
3500 }
3501 
3502 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3503  MCAsmParser &Parser = getParser();
3504 
3505  // Some SVE instructions have a decoration after the immediate, i.e.
3506  // "mul vl". We parse them here and add tokens, which must be present in the
3507  // asm string in the tablegen instruction.
3508  bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3509  bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3510  if (!Parser.getTok().getString().equals_lower("mul") ||
3511  !(NextIsVL || NextIsHash))
3512  return true;
3513 
3514  Operands.push_back(
3515  AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3516  Parser.Lex(); // Eat the "mul"
3517 
3518  if (NextIsVL) {
3519  Operands.push_back(
3520  AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3521  Parser.Lex(); // Eat the "vl"
3522  return false;
3523  }
3524 
3525  if (NextIsHash) {
3526  Parser.Lex(); // Eat the #
3527  SMLoc S = getLoc();
3528 
3529  // Parse immediate operand.
3530  const MCExpr *ImmVal;
3531  if (!Parser.parseExpression(ImmVal))
3532  if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3533  Operands.push_back(AArch64Operand::CreateImm(
3534  MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3535  getContext()));
3536  return MatchOperand_Success;
3537  }
3538  }
3539 
3540  return Error(getLoc(), "expected 'vl' or '#<imm>'");
3541 }
3542 
3543 /// parseOperand - Parse a arm instruction operand. For now this parses the
3544 /// operand regardless of the mnemonic.
3545 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3546  bool invertCondCode) {
3547  MCAsmParser &Parser = getParser();
3548 
3549  OperandMatchResultTy ResTy =
3550  MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3551 
3552  // Check if the current operand has a custom associated parser, if so, try to
3553  // custom parse the operand, or fallback to the general approach.
3554  if (ResTy == MatchOperand_Success)
3555  return false;
3556  // If there wasn't a custom match, try the generic matcher below. Otherwise,
3557  // there was a match, but an error occurred, in which case, just return that
3558  // the operand parsing failed.
3559  if (ResTy == MatchOperand_ParseFail)
3560  return true;
3561 
3562  // Nothing custom, so do general case parsing.
3563  SMLoc S, E;
3564  switch (getLexer().getKind()) {
3565  default: {
3566  SMLoc S = getLoc();
3567  const MCExpr *Expr;
3568  if (parseSymbolicImmVal(Expr))
3569  return Error(S, "invalid operand");
3570 
3571  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3572  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3573  return false;
3574  }
3575  case AsmToken::LBrac: {
3576  SMLoc Loc = Parser.getTok().getLoc();
3577  Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3578  getContext()));
3579  Parser.Lex(); // Eat '['
3580 
3581  // There's no comma after a '[', so we can parse the next operand
3582  // immediately.
3583  return parseOperand(Operands, false, false);
3584  }
3585  case AsmToken::LCurly:
3586  return parseNeonVectorList(Operands);
3587  case AsmToken::Identifier: {
3588  // If we're expecting a Condition Code operand, then just parse that.
3589  if (isCondCode)
3590  return parseCondCode(Operands, invertCondCode);
3591 
3592  // If it's a register name, parse it.
3593  if (!parseRegister(Operands))
3594  return false;
3595 
3596  // See if this is a "mul vl" decoration or "mul #<int>" operand used
3597  // by SVE instructions.
3598  if (!parseOptionalMulOperand(Operands))
3599  return false;
3600 
3601  // This could be an optional "shift" or "extend" operand.
3602  OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3603  // We can only continue if no tokens were eaten.
3604  if (GotShift != MatchOperand_NoMatch)
3605  return GotShift;
3606 
3607  // This was not a register so parse other operands that start with an
3608  // identifier (like labels) as expressions and create them as immediates.
3609  const MCExpr *IdVal;
3610  S = getLoc();
3611  if (getParser().parseExpression(IdVal))
3612  return true;
3613  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3614  Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3615  return false;
3616  }
3617  case AsmToken::Integer:
3618  case AsmToken::Real:
3619  case AsmToken::Hash: {
3620  // #42 -> immediate.
3621  S = getLoc();
3622 
3623  parseOptionalToken(AsmToken::Hash);
3624 
3625  // Parse a negative sign
3626  bool isNegative = false;
3627  if (Parser.getTok().is(AsmToken::Minus)) {
3628  isNegative = true;
3629  // We need to consume this token only when we have a Real, otherwise
3630  // we let parseSymbolicImmVal take care of it
3631  if (Parser.getLexer().peekTok().is(AsmToken::Real))
3632  Parser.Lex();
3633  }
3634 
3635  // The only Real that should come through here is a literal #0.0 for
3636  // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3637  // so convert the value.
3638  const AsmToken &Tok = Parser.getTok();
3639  if (Tok.is(AsmToken::Real)) {
3640  APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3641  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3642  if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3643  Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3644  Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3645  return TokError("unexpected floating point literal");
3646  else if (IntVal != 0 || isNegative)
3647  return TokError("expected floating-point constant #0.0");
3648  Parser.Lex(); // Eat the token.
3649 
3650  Operands.push_back(
3651  AArch64Operand::CreateToken("#0", false, S, getContext()));
3652  Operands.push_back(
3653  AArch64Operand::CreateToken(".0", false, S, getContext()));
3654  return false;
3655  }
3656 
3657  const MCExpr *ImmVal;
3658  if (parseSymbolicImmVal(ImmVal))
3659  return true;
3660 
3661  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3662  Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3663  return false;
3664  }
3665  case AsmToken::Equal: {
3666  SMLoc Loc = getLoc();
3667  if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3668  return TokError("unexpected token in operand");
3669  Parser.Lex(); // Eat '='
3670  const MCExpr *SubExprVal;
3671  if (getParser().parseExpression(SubExprVal))
3672  return true;
3673 
3674  if (Operands.size() < 2 ||
3675  !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3676  return Error(Loc, "Only valid when first operand is register");
3677 
3678  bool IsXReg =
3679  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3680  Operands[1]->getReg());
3681 
3682  MCContext& Ctx = getContext();
3683  E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3684  // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3685  if (isa<MCConstantExpr>(SubExprVal)) {
3686  uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3687  uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3688  while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3689  ShiftAmt += 16;
3690  Imm >>= 16;
3691  }
3692  if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3693  Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3694  Operands.push_back(AArch64Operand::CreateImm(
3695  MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3696  if (ShiftAmt)
3697  Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3698  ShiftAmt, true, S, E, Ctx));
3699  return false;
3700  }
3701  APInt Simm = APInt(64, Imm << ShiftAmt);
3702  // check if the immediate is an unsigned or signed 32-bit int for W regs
3703  if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3704  return Error(Loc, "Immediate too large for register");
3705  }
3706  // If it is a label or an imm that cannot fit in a movz, put it into CP.
3707  const MCExpr *CPLoc =
3708  getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3709  Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3710  return false;
3711  }
3712  }
3713 }
3714 
3715 bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3716  const MCParsedAsmOperand &Op2) const {
3717  auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3718  auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3719  if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3720  AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3721  return MCTargetAsmParser::regsEqual(Op1, Op2);
3722 
3723  assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
3724  "Testing equality of non-scalar registers not supported");
3725 
3726  // Check if a registers match their sub/super register classes.
3727  if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3728  return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3729  if (AOp1.getRegEqualityTy() == EqualsSubReg)
3730  return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3731  if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3732  return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3733  if (AOp2.getRegEqualityTy() == EqualsSubReg)
3734  return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3735 
3736  return false;
3737 }
3738 
3739 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3740 /// operands.
3741 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3742  StringRef Name, SMLoc NameLoc,
3743  OperandVector &Operands) {
3744  MCAsmParser &Parser = getParser();
3745  Name = StringSwitch<StringRef>(Name.lower())
3746  .Case("beq", "b.eq")
3747  .Case("bne", "b.ne")
3748  .Case("bhs", "b.hs")
3749  .Case("bcs", "b.cs")
3750  .Case("blo", "b.lo")
3751  .Case("bcc", "b.cc")
3752  .Case("bmi", "b.mi")
3753  .Case("bpl", "b.pl")
3754  .Case("bvs", "b.vs")
3755  .Case("bvc", "b.vc")
3756  .Case("bhi", "b.hi")
3757  .Case("bls", "b.ls")
3758  .Case("bge", "b.ge")
3759  .Case("blt", "b.lt")
3760  .Case("bgt", "b.gt")
3761  .Case("ble", "b.le")
3762  .Case("bal", "b.al")
3763  .Case("bnv", "b.nv")
3764  .Default(Name);
3765 
3766  // First check for the AArch64-specific .req directive.
3767  if (Parser.getTok().is(AsmToken::Identifier) &&
3768  Parser.getTok().getIdentifier() == ".req") {
3769  parseDirectiveReq(Name, NameLoc);
3770  // We always return 'error' for this, as we're done with this
3771  // statement and don't need to match the 'instruction."
3772  return true;
3773  }
3774 
3775  // Create the leading tokens for the mnemonic, split by '.' characters.
3776  size_t Start = 0, Next = Name.find('.');
3777  StringRef Head = Name.slice(Start, Next);
3778 
3779  // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
3780  // the SYS instruction.
3781  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
3782  Head == "cfp" || Head == "dvp" || Head == "cpp")
3783  return parseSysAlias(Head, NameLoc, Operands);
3784 
3785  Operands.push_back(
3786  AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3787  Mnemonic = Head;
3788 
3789  // Handle condition codes for a branch mnemonic
3790  if (Head == "b" && Next != StringRef::npos) {
3791  Start = Next;
3792  Next = Name.find('.', Start + 1);
3793  Head = Name.slice(Start + 1, Next);
3794 
3795  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3796  (Head.data() - Name.data()));
3797  AArch64CC::CondCode CC = parseCondCodeString(Head);
3798  if (CC == AArch64CC::Invalid)
3799  return Error(SuffixLoc, "invalid condition code");
3800  Operands.push_back(
3801  AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3802  Operands.push_back(
3803  AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3804  }
3805 
3806  // Add the remaining tokens in the mnemonic.
3807  while (Next != StringRef::npos) {
3808  Start = Next;
3809  Next = Name.find('.', Start + 1);
3810  Head = Name.slice(Start, Next);
3811  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3812  (Head.data() - Name.data()) + 1);
3813  Operands.push_back(
3814  AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3815  }
3816 
3817  // Conditional compare instructions have a Condition Code operand, which needs
3818  // to be parsed and an immediate operand created.
3819  bool condCodeFourthOperand =
3820  (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3821  Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3822  Head == "csinc" || Head == "csinv" || Head == "csneg");
3823 
3824  // These instructions are aliases to some of the conditional select
3825  // instructions. However, the condition code is inverted in the aliased
3826  // instruction.
3827  //
3828  // FIXME: Is this the correct way to handle these? Or should the parser
3829  // generate the aliased instructions directly?
3830  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3831  bool condCodeThirdOperand =
3832  (Head == "cinc" || Head == "cinv" || Head == "cneg");
3833 
3834  // Read the remaining operands.
3835  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3836 
3837  unsigned N = 1;
3838  do {
3839  // Parse and remember the operand.
3840  if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3841  (N == 3 && condCodeThirdOperand) ||
3842  (N == 2 && condCodeSecondOperand),
3843  condCodeSecondOperand || condCodeThirdOperand)) {
3844  return true;
3845  }
3846 
3847  // After successfully parsing some operands there are two special cases to
3848  // consider (i.e. notional operands not separated by commas). Both are due
3849  // to memory specifiers:
3850  // + An RBrac will end an address for load/store/prefetch
3851  // + An '!' will indicate a pre-indexed operation.
3852  //
3853  // It's someone else's responsibility to make sure these tokens are sane
3854  // in the given context!
3855 
3856  SMLoc RLoc = Parser.getTok().getLoc();
3857  if (parseOptionalToken(AsmToken::RBrac))
3858  Operands.push_back(
3859  AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3860  SMLoc ELoc = Parser.getTok().getLoc();
3861  if (parseOptionalToken(AsmToken::Exclaim))
3862  Operands.push_back(
3863  AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3864 
3865  ++N;
3866  } while (parseOptionalToken(AsmToken::Comma));
3867  }
3868 
3869  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3870  return true;
3871 
3872  return false;
3873 }
3874 
3875 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
3876  assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
3877  return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
3878  (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
3879  (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
3880  (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
3881  (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
3882  (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
3883 }
3884 
3885 // FIXME: This entire function is a giant hack to provide us with decent
3886 // operand range validation/diagnostics until TableGen/MC can be extended
3887 // to support autogeneration of this kind of validation.
3888 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
3889  SmallVectorImpl<SMLoc> &Loc) {
3890  const MCRegisterInfo *RI = getContext().getRegisterInfo();
3891  const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
3892 
3893  // A prefix only applies to the instruction following it. Here we extract
3894  // prefix information for the next instruction before validating the current
3895  // one so that in the case of failure we don't erronously continue using the
3896  // current prefix.
3897  PrefixInfo Prefix = NextPrefix;
3898  NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
3899 
3900  // Before validating the instruction in isolation we run through the rules
3901  // applicable when it follows a prefix instruction.
3902  // NOTE: brk & hlt can be prefixed but require no additional validation.
3903  if (Prefix.isActive() &&
3904  (Inst.getOpcode() != AArch64::BRK) &&
3905  (Inst.getOpcode() != AArch64::HLT)) {
3906 
3907  // Prefixed intructions must have a destructive operand.
3910  return Error(IDLoc, "instruction is unpredictable when following a"
3911  " movprfx, suggest replacing movprfx with mov");
3912 
3913  // Destination operands must match.
3914  if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
3915  return Error(Loc[0], "instruction is unpredictable when following a"
3916  " movprfx writing to a different destination");
3917 
3918  // Destination operand must not be used in any other location.
3919  for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
3920  if (Inst.getOperand(i).isReg() &&
3921  (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
3922  isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
3923  return Error(Loc[0], "instruction is unpredictable when following a"
3924  " movprfx and destination also used as non-destructive"
3925  " source");
3926  }
3927 
3928  auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
3929  if (Prefix.isPredicated()) {
3930  int PgIdx = -1;
3931 
3932  // Find the instructions general predicate.
3933  for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
3934  if (Inst.getOperand(i).isReg() &&
3935  PPRRegClass.contains(Inst.getOperand(i).getReg())) {
3936  PgIdx = i;
3937  break;
3938  }
3939 
3940  // Instruction must be predicated if the movprfx is predicated.
3941  if (PgIdx == -1 ||
3943  return Error(IDLoc, "instruction is unpredictable when following a"
3944  " predicated movprfx, suggest using unpredicated movprfx");
3945 
3946  // Instruction must use same general predicate as the movprfx.
3947  if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
3948  return Error(IDLoc, "instruction is unpredictable when following a"
3949  " predicated movprfx using a different general predicate");
3950 
3951  // Instruction element type must match the movprfx.
3952  if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
3953  return Error(IDLoc, "instruction is unpredictable when following a"
3954  " predicated movprfx with a different element size");
3955  }
3956  }
3957 
3958  // Check for indexed addressing modes w/ the base register being the
3959  // same as a destination/source register or pair load where
3960  // the Rt == Rt2. All of those are undefined behaviour.
3961  switch (Inst.getOpcode()) {
3962  case AArch64::LDPSWpre:
3963  case AArch64::LDPWpost:
3964  case AArch64::LDPWpre:
3965  case AArch64::LDPXpost:
3966  case AArch64::LDPXpre: {
3967  unsigned Rt = Inst.getOperand(1).getReg();
3968  unsigned Rt2 = Inst.getOperand(2).getReg();
3969  unsigned Rn = Inst.getOperand(3).getReg();
3970  if (RI->isSubRegisterEq(Rn, Rt))
3971  return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3972  "is also a destination");
3973  if (RI->isSubRegisterEq(Rn, Rt2))
3974  return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3975  "is also a destination");
3977  }
3978  case AArch64::LDPDi:
3979  case AArch64::LDPQi:
3980  case AArch64::LDPSi:
3981  case AArch64::LDPSWi:
3982  case AArch64::LDPWi:
3983  case AArch64::LDPXi: {
3984  unsigned Rt = Inst.getOperand(0).getReg();
3985  unsigned Rt2 = Inst.getOperand(1).getReg();
3986  if (Rt == Rt2)
3987  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3988  break;
3989  }
3990  case AArch64::LDPDpost:
3991  case AArch64::LDPDpre:
3992  case AArch64::LDPQpost:
3993  case AArch64::LDPQpre:
3994  case AArch64::LDPSpost:
3995  case AArch64::LDPSpre:
3996  case AArch64::LDPSWpost: {
3997  unsigned Rt = Inst.getOperand(1).getReg();
3998  unsigned Rt2 = Inst.getOperand(2).getReg();
3999  if (Rt == Rt2)
4000  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4001  break;
4002  }
4003  case AArch64::STPDpost:
4004  case AArch64::STPDpre:
4005  case AArch64::STPQpost:
4006  case AArch64::STPQpre:
4007  case AArch64::STPSpost:
4008  case AArch64::STPSpre:
4009  case AArch64::STPWpost:
4010  case AArch64::STPWpre:
4011  case AArch64::STPXpost:
4012  case AArch64::STPXpre: {
4013  unsigned Rt = Inst.getOperand(1).getReg();
4014  unsigned Rt2 = Inst.getOperand(2).getReg();
4015  unsigned Rn = Inst.getOperand(3).getReg();
4016  if (RI->isSubRegisterEq(Rn, Rt))
4017  return Error(Loc[0], "unpredictable STP instruction, writeback base "
4018  "is also a source");
4019  if (RI->isSubRegisterEq(Rn, Rt2))
4020  return Error(Loc[1], "unpredictable STP instruction, writeback base "
4021  "is also a source");
4022  break;
4023  }
4024  case AArch64::LDRBBpre:
4025  case AArch64::LDRBpre:
4026  case AArch64::LDRHHpre:
4027  case AArch64::LDRHpre:
4028  case AArch64::LDRSBWpre:
4029  case AArch64::LDRSBXpre:
4030  case AArch64::LDRSHWpre:
4031  case AArch64::LDRSHXpre:
4032  case AArch64::LDRSWpre:
4033  case AArch64::LDRWpre:
4034  case AArch64::LDRXpre:
4035  case AArch64::LDRBBpost:
4036  case AArch64::LDRBpost:
4037  case AArch64::LDRHHpost:
4038  case AArch64::LDRHpost:
4039  case AArch64::LDRSBWpost:
4040  case AArch64::LDRSBXpost:
4041  case AArch64::LDRSHWpost:
4042  case AArch64::LDRSHXpost:
4043  case AArch64::LDRSWpost:
4044  case AArch64::LDRWpost:
4045  case AArch64::LDRXpost: {
4046  unsigned Rt = Inst.getOperand(1).getReg();
4047  unsigned Rn = Inst.getOperand(2).getReg();
4048  if (RI->isSubRegisterEq(Rn, Rt))
4049  return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4050  "is also a source");
4051  break;
4052  }
4053  case AArch64::STRBBpost:
4054  case AArch64::STRBpost:
4055  case AArch64::STRHHpost:
4056  case AArch64::STRHpost:
4057  case AArch64::STRWpost:
4058  case AArch64::STRXpost:
4059  case AArch64::STRBBpre:
4060  case AArch64::STRBpre:
4061  case AArch64::STRHHpre:
4062  case AArch64::STRHpre:
4063  case AArch64::STRWpre:
4064  case AArch64::STRXpre: {
4065  unsigned Rt = Inst.getOperand(1).getReg();
4066  unsigned Rn = Inst.getOperand(2).getReg();
4067  if (RI->isSubRegisterEq(Rn, Rt))
4068  return Error(Loc[0], "unpredictable STR instruction, writeback base "
4069  "is also a source");
4070  break;
4071  }
4072  case AArch64::STXRB:
4073  case AArch64::STXRH:
4074  case AArch64::STXRW:
4075  case AArch64::STXRX:
4076  case AArch64::STLXRB:
4077  case AArch64::STLXRH:
4078  case AArch64::STLXRW:
4079  case AArch64::STLXRX: {
4080  unsigned Rs = Inst.getOperand(0).getReg();
4081  unsigned Rt = Inst.getOperand(1).getReg();
4082  unsigned Rn = Inst.getOperand(2).getReg();
4083  if (RI->isSubRegisterEq(Rt, Rs) ||
4084  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4085  return Error(Loc[0],
4086  "unpredictable STXR instruction, status is also a source");
4087  break;
4088  }
4089  case AArch64::STXPW:
4090  case AArch64::STXPX:
4091  case AArch64::STLXPW:
4092  case AArch64::STLXPX: {
4093  unsigned Rs = Inst.getOperand(0).getReg();
4094  unsigned Rt1 = Inst.getOperand(1).getReg();
4095  unsigned Rt2 = Inst.getOperand(2).getReg();
4096  unsigned Rn = Inst.getOperand(3).getReg();
4097  if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4098  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4099  return Error(Loc[0],
4100  "unpredictable STXP instruction, status is also a source");
4101  break;
4102  }
4103  }
4104 
4105 
4106  // Now check immediate ranges. Separate from the above as there is overlap
4107  // in the instructions being checked and this keeps the nested conditionals
4108  // to a minimum.
4109  switch (Inst.getOpcode()) {
4110  case AArch64::ADDSWri:
4111  case AArch64::ADDSXri:
4112  case AArch64::ADDWri:
4113  case AArch64::ADDXri:
4114  case AArch64::SUBSWri:
4115  case AArch64::SUBSXri:
4116  case AArch64::SUBWri:
4117  case AArch64::SUBXri: {
4118  // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4119  // some slight duplication here.
4120  if (Inst.getOperand(2).isExpr()) {
4121  const MCExpr *Expr = Inst.getOperand(2).getExpr();
4122  AArch64MCExpr::VariantKind ELFRefKind;
4123  MCSymbolRefExpr::VariantKind DarwinRefKind;
4124  int64_t Addend;
4125  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4126 
4127  // Only allow these with ADDXri.
4128  if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4129  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4130  Inst.getOpcode() == AArch64::ADDXri)
4131  return false;
4132 
4133  // Only allow these with ADDXri/ADDWri
4134  if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4135  ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4136  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4137  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4138  ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4139  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4140  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4141  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4142  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4143  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4144  (Inst.getOpcode() == AArch64::ADDXri ||
4145  Inst.getOpcode() == AArch64::ADDWri))
4146  return false;
4147 
4148  // Don't allow symbol refs in the immediate field otherwise
4149  // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4150  // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4151  // 'cmp w0, 'borked')
4152  return Error(Loc.back(), "invalid immediate expression");
4153  }
4154  // We don't validate more complex expressions here
4155  }
4156  return false;
4157  }
4158  default:
4159  return false;
4160  }
4161 }
4162 
4163 static std::string AArch64MnemonicSpellCheck(StringRef S,
4164  const FeatureBitset &FBS,
4165  unsigned VariantID = 0);
4166 
4167 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4168  uint64_t ErrorInfo,
4169  OperandVector &Operands) {
4170  switch (ErrCode) {
4171  case Match_InvalidTiedOperand: {
4173  static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4174  .getRegEqualityTy();
4175  switch (EqTy) {
4176  case RegConstraintEqualityTy::EqualsSubReg:
4177  return Error(Loc, "operand must be 64-bit form of destination register");
4178  case RegConstraintEqualityTy::EqualsSuperReg:
4179  return Error(Loc, "operand must be 32-bit form of destination register");
4180  case RegConstraintEqualityTy::EqualsReg:
4181  return Error(Loc, "operand must match destination register");
4182  }
4183  llvm_unreachable("Unknown RegConstraintEqualityTy");
4184  }
4185  case Match_MissingFeature:
4186  return Error(Loc,
4187  "instruction requires a CPU feature not currently enabled");
4188  case Match_InvalidOperand:
4189  return Error(Loc, "invalid operand for instruction");
4190  case Match_InvalidSuffix:
4191  return Error(Loc, "invalid type suffix for instruction");
4192  case Match_InvalidCondCode:
4193  return Error(Loc, "expected AArch64 condition code");
4194  case Match_AddSubRegExtendSmall:
4195  return Error(Loc,
4196  "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4197  case Match_AddSubRegExtendLarge:
4198  return Error(Loc,
4199  "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4200  case Match_AddSubSecondSource:
4201  return Error(Loc,
4202  "expected compatible register, symbol or integer in range [0, 4095]");
4203  case Match_LogicalSecondSource:
4204  return Error(Loc, "expected compatible register or logical immediate");
4205  case Match_InvalidMovImm32Shift:
4206  return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4207  case Match_InvalidMovImm64Shift:
4208  return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4209  case Match_AddSubRegShift32:
4210  return Error(Loc,
4211  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4212  case Match_AddSubRegShift64:
4213  return Error(Loc,
4214  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4215  case Match_InvalidFPImm:
4216  return Error(Loc,
4217  "expected compatible register or floating-point constant");
4218  case Match_InvalidMemoryIndexedSImm6:
4219  return Error(Loc, "index must be an integer in range [-32, 31].");
4220  case Match_InvalidMemoryIndexedSImm5:
4221  return Error(Loc, "index must be an integer in range [-16, 15].");
4222  case Match_InvalidMemoryIndexed1SImm4:
4223  return Error(Loc, "index must be an integer in range [-8, 7].");
4224  case Match_InvalidMemoryIndexed2SImm4:
4225  return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4226  case Match_InvalidMemoryIndexed3SImm4:
4227  return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4228  case Match_InvalidMemoryIndexed4SImm4:
4229  return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4230  case Match_InvalidMemoryIndexed16SImm4:
4231  return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4232  case Match_InvalidMemoryIndexed1SImm6:
4233  return Error(Loc, "index must be an integer in range [-32, 31].");
4234  case Match_InvalidMemoryIndexedSImm8:
4235  return Error(Loc, "index must be an integer in range [-128, 127].");
4236  case Match_InvalidMemoryIndexedSImm9:
4237  return Error(Loc, "index must be an integer in range [-256, 255].");
4238  case Match_InvalidMemoryIndexed16SImm9:
4239  return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4240  case Match_InvalidMemoryIndexed8SImm10:
4241  return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4242  case Match_InvalidMemoryIndexed4SImm7:
4243  return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4244  case Match_InvalidMemoryIndexed8SImm7:
4245  return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4246  case Match_InvalidMemoryIndexed16SImm7:
4247  return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4248  case Match_InvalidMemoryIndexed8UImm5:
4249  return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4250  case Match_InvalidMemoryIndexed4UImm5:
4251  return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4252  case Match_InvalidMemoryIndexed2UImm5:
4253  return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4254  case Match_InvalidMemoryIndexed8UImm6:
4255  return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4256  case Match_InvalidMemoryIndexed16UImm6:
4257  return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4258  case Match_InvalidMemoryIndexed4UImm6:
4259  return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4260  case Match_InvalidMemoryIndexed2UImm6:
4261  return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4262  case Match_InvalidMemoryIndexed1UImm6:
4263  return Error(Loc, "index must be in range [0, 63].");
4264  case Match_InvalidMemoryWExtend8:
4265  return Error(Loc,
4266  "expected 'uxtw' or 'sxtw' with optional shift of #0");
4267  case Match_InvalidMemoryWExtend16:
4268  return Error(Loc,
4269  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4270  case Match_InvalidMemoryWExtend32:
4271  return Error(Loc,
4272  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4273  case Match_InvalidMemoryWExtend64:
4274  return Error(Loc,
4275  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4276  case Match_InvalidMemoryWExtend128:
4277  return Error(Loc,
4278  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4279  case Match_InvalidMemoryXExtend8:
4280  return Error(Loc,
4281  "expected 'lsl' or 'sxtx' with optional shift of #0");
4282  case Match_InvalidMemoryXExtend16:
4283  return Error(Loc,
4284  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4285  case Match_InvalidMemoryXExtend32:
4286  return Error(Loc,
4287  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4288  case Match_InvalidMemoryXExtend64:
4289  return Error(Loc,
4290  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4291  case Match_InvalidMemoryXExtend128:
4292  return Error(Loc,
4293  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4294  case Match_InvalidMemoryIndexed1:
4295  return Error(Loc, "index must be an integer in range [0, 4095].");
4296  case Match_InvalidMemoryIndexed2:
4297  return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4298  case Match_InvalidMemoryIndexed4:
4299  return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4300  case Match_InvalidMemoryIndexed8:
4301  return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4302  case Match_InvalidMemoryIndexed16:
4303  return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4304  case Match_InvalidImm0_1:
4305  return Error(Loc, "immediate must be an integer in range [0, 1].");
4306  case Match_InvalidImm0_7:
4307  return Error(Loc, "immediate must be an integer in range [0, 7].");
4308  case Match_InvalidImm0_15:
4309  return Error(Loc, "immediate must be an integer in range [0, 15].");
4310  case Match_InvalidImm0_31:
4311  return Error(Loc, "immediate must be an integer in range [0, 31].");
4312  case Match_InvalidImm0_63:
4313  return Error(Loc, "immediate must be an integer in range [0, 63].");
4314  case Match_InvalidImm0_127:
4315  return Error(Loc, "immediate must be an integer in range [0, 127].");
4316  case Match_InvalidImm0_255:
4317  return Error(Loc, "immediate must be an integer in range [0, 255].");
4318  case Match_InvalidImm0_65535:
4319  return Error(Loc, "immediate must be an integer in range [0, 65535].");
4320  case Match_InvalidImm1_8:
4321  return Error(Loc, "immediate must be an integer in range [1, 8].");
4322  case Match_InvalidImm1_16:
4323  return Error(Loc, "immediate must be an integer in range [1, 16].");
4324  case Match_InvalidImm1_32:
4325  return Error(Loc, "immediate must be an integer in range [1, 32].");
4326  case Match_InvalidImm1_64:
4327  return Error(Loc, "immediate must be an integer in range [1, 64].");
4328  case Match_InvalidSVEAddSubImm8:
4329  return Error(Loc, "immediate must be an integer in range [0, 255]"
4330  " with a shift amount of 0");
4331  case Match_InvalidSVEAddSubImm16:
4332  case Match_InvalidSVEAddSubImm32:
4333  case Match_InvalidSVEAddSubImm64:
4334  return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4335  "multiple of 256 in range [256, 65280]");
4336  case Match_InvalidSVECpyImm8:
4337  return Error(Loc, "immediate must be an integer in range [-128, 255]"
4338  " with a shift amount of 0");
4339  case Match_InvalidSVECpyImm16:
4340  return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4341  "multiple of 256 in range [-32768, 65280]");
4342  case Match_InvalidSVECpyImm32:
4343  case Match_InvalidSVECpyImm64:
4344  return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4345  "multiple of 256 in range [-32768, 32512]");
4346  case Match_InvalidIndexRange1_1:
4347  return Error(Loc, "expected lane specifier '[1]'");
4348  case Match_InvalidIndexRange0_15:
4349  return Error(Loc, "vector lane must be an integer in range [0, 15].");
4350  case Match_InvalidIndexRange0_7:
4351  return Error(Loc, "vector lane must be an integer in range [0, 7].");
4352  case Match_InvalidIndexRange0_3:
4353  return Error(Loc, "vector lane must be an integer in range [0, 3].");
4354  case Match_InvalidIndexRange0_1:
4355  return Error(Loc, "vector lane must be an integer in range [0, 1].");
4356  case Match_InvalidSVEIndexRange0_63:
4357  return Error(Loc, "vector lane must be an integer in range [0, 63].");
4358  case Match_InvalidSVEIndexRange0_31:
4359  return Error(Loc, "vector lane must be an integer in range [0, 31].");
4360  case Match_InvalidSVEIndexRange0_15:
4361  return Error(Loc, "vector lane must be an integer in range [0, 15].");
4362  case Match_InvalidSVEIndexRange0_7:
4363  return Error(Loc, "vector lane must be an integer in range [0, 7].");
4364  case Match_InvalidSVEIndexRange0_3:
4365  return Error(Loc, "vector lane must be an integer in range [0, 3].");
4366  case Match_InvalidLabel:
4367  return Error(Loc, "expected label or encodable integer pc offset");
4368  case Match_MRS:
4369  return Error(Loc, "expected readable system register");
4370  case Match_MSR:
4371  return Error(Loc, "expected writable system register or pstate");
4372  case Match_InvalidComplexRotationEven:
4373  return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4374  case Match_InvalidComplexRotationOdd:
4375  return Error(Loc, "complex rotation must be 90 or 270.");
4376  case Match_MnemonicFail: {
4377  std::string Suggestion = AArch64MnemonicSpellCheck(
4378  ((AArch64Operand &)*Operands[0]).getToken(),
4379  ComputeAvailableFeatures(STI->getFeatureBits()));
4380  return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4381  }
4382  case Match_InvalidGPR64shifted8:
4383  return Error(Loc, "register must be x0..x30 or xzr, without shift");
4384  case Match_InvalidGPR64shifted16:
4385  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4386  case Match_InvalidGPR64shifted32:
4387  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4388  case Match_InvalidGPR64shifted64:
4389  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4390  case Match_InvalidGPR64NoXZRshifted8:
4391  return Error(Loc, "register must be x0..x30 without shift");
4392  case Match_InvalidGPR64NoXZRshifted16:
4393  return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4394  case Match_InvalidGPR64NoXZRshifted32:
4395  return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4396  case Match_InvalidGPR64NoXZRshifted64:
4397  return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4398  case Match_InvalidZPR32UXTW8:
4399  case Match_InvalidZPR32SXTW8:
4400  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4401  case Match_InvalidZPR32UXTW16:
4402  case Match_InvalidZPR32SXTW16:
4403  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4404  case Match_InvalidZPR32UXTW32:
4405  case Match_InvalidZPR32SXTW32:
4406  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4407  case Match_InvalidZPR32UXTW64:
4408  case Match_InvalidZPR32SXTW64:
4409  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4410  case Match_InvalidZPR64UXTW8:
4411  case Match_InvalidZPR64SXTW8:
4412  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4413  case Match_InvalidZPR64UXTW16:
4414  case Match_InvalidZPR64SXTW16:
4415  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4416  case Match_InvalidZPR64UXTW32:
4417  case Match_InvalidZPR64SXTW32:
4418  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4419  case Match_InvalidZPR64UXTW64:
4420  case Match_InvalidZPR64SXTW64:
4421  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4422  case Match_InvalidZPR32LSL8:
4423  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4424  case Match_InvalidZPR32LSL16:
4425  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4426  case Match_InvalidZPR32LSL32:
4427  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4428  case Match_InvalidZPR32LSL64:
4429  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4430  case Match_InvalidZPR64LSL8:
4431  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4432  case Match_InvalidZPR64LSL16:
4433  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4434  case Match_InvalidZPR64LSL32:
4435  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4436  case Match_InvalidZPR64LSL64:
4437  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4438  case Match_InvalidZPR0:
4439  return Error(Loc, "expected register without element width suffix");
4440  case Match_InvalidZPR8:
4441  case Match_InvalidZPR16:
4442  case Match_InvalidZPR32:
4443  case Match_InvalidZPR64:
4444  case Match_InvalidZPR128:
4445  return Error(Loc, "invalid element width");
4446  case Match_InvalidZPR_3b8:
4447  return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4448  case Match_InvalidZPR_3b16:
4449  return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4450  case Match_InvalidZPR_3b32:
4451  return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4452  case Match_InvalidZPR_4b16:
4453  return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4454  case Match_InvalidZPR_4b32:
4455  return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4456  case Match_InvalidZPR_4b64:
4457  return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4458  case Match_InvalidSVEPattern:
4459  return Error(Loc, "invalid predicate pattern");
4460  case Match_InvalidSVEPredicateAnyReg:
4461  case Match_InvalidSVEPredicateBReg:
4462  case Match_InvalidSVEPredicateHReg:
4463  case Match_InvalidSVEPredicateSReg:
4464  case Match_InvalidSVEPredicateDReg:
4465  return Error(Loc, "invalid predicate register.");
4466  case Match_InvalidSVEPredicate3bAnyReg:
4467  return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
4468  case Match_InvalidSVEPredicate3bBReg:
4469  return Error(Loc, "invalid restricted predicate register, expected p0.b..p7.b");
4470  case Match_InvalidSVEPredicate3bHReg:
4471  return Error(Loc, "invalid restricted predicate register, expected p0.h..p7.h");
4472  case Match_InvalidSVEPredicate3bSReg:
4473  return Error(Loc, "invalid restricted predicate register, expected p0.s..p7.s");
4474  case Match_InvalidSVEPredicate3bDReg:
4475  return Error(Loc, "invalid restricted predicate register, expected p0.d..p7.d");
4476  case Match_InvalidSVEExactFPImmOperandHalfOne:
4477  return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4478  case Match_InvalidSVEExactFPImmOperandHalfTwo:
4479  return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4480  case Match_InvalidSVEExactFPImmOperandZeroOne:
4481  return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4482  default:
4483  llvm_unreachable("unexpected error code!");
4484  }
4485 }
4486 
4487 static const char *getSubtargetFeatureName(uint64_t Val);
4488 
4489 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4490  OperandVector &Operands,
4491  MCStreamer &Out,
4492  uint64_t &ErrorInfo,
4493  bool MatchingInlineAsm) {
4494  assert(!Operands.empty() && "Unexpect empty operand list!");
4495  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4496  assert(Op.isToken() && "Leading operand should always be a mnemonic!");
4497 
4498  StringRef Tok = Op.getToken();
4499  unsigned NumOperands = Operands.size();
4500 
4501  if (NumOperands == 4 && Tok == "lsl") {
4502  AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4503  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4504  if (Op2.isScalarReg() && Op3.isImm()) {
4505  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4506  if (Op3CE) {
4507  uint64_t Op3Val = Op3CE->getValue();
4508  uint64_t NewOp3Val = 0;
4509  uint64_t NewOp4Val = 0;
4510  if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4511  Op2.getReg())) {
4512  NewOp3Val = (32 - Op3Val) & 0x1f;
4513  NewOp4Val = 31 - Op3Val;
4514  } else {
4515  NewOp3Val = (64 - Op3Val) & 0x3f;
4516  NewOp4Val = 63 - Op3Val;
4517  }
4518 
4519  const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4520  const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4521 
4522  Operands[0] = AArch64Operand::CreateToken(
4523  "ubfm", false, Op.getStartLoc(), getContext());
4524  Operands.push_back(AArch64Operand::CreateImm(
4525  NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4526  Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4527  Op3.getEndLoc(), getContext());
4528  }
4529  }
4530  } else if (NumOperands == 4 && Tok == "bfc") {
4531  // FIXME: Horrible hack to handle BFC->BFM alias.
4532  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4533  AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4534  AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4535 
4536  if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4537  const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4538  const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4539 
4540  if (LSBCE && WidthCE) {
4541  uint64_t LSB = LSBCE->getValue();
4542  uint64_t Width = WidthCE->getValue();
4543 
4544  uint64_t RegWidth = 0;
4545  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4546  Op1.getReg()))
4547  RegWidth = 64;
4548  else
4549  RegWidth = 32;
4550 
4551  if (LSB >= RegWidth)
4552  return Error(LSBOp.getStartLoc(),
4553  "expected integer in range [0, 31]");
4554  if (Width < 1 || Width > RegWidth)
4555  return Error(WidthOp.getStartLoc(),
4556  "expected integer in range [1, 32]");
4557 
4558  uint64_t ImmR = 0;
4559  if (RegWidth == 32)
4560  ImmR = (32 - LSB) & 0x1f;
4561  else
4562  ImmR = (64 - LSB) & 0x3f;
4563 
4564  uint64_t ImmS = Width - 1;
4565 
4566  if (ImmR != 0 && ImmS >= ImmR)
4567  return Error(WidthOp.getStartLoc(),
4568  "requested insert overflows register");
4569 
4570  const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4571  const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4572  Operands[0] = AArch64Operand::CreateToken(
4573  "bfm", false, Op.getStartLoc(), getContext());
4574  Operands[2] = AArch64Operand::CreateReg(
4575  RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4576  SMLoc(), SMLoc(), getContext());
4577  Operands[3] = AArch64Operand::CreateImm(
4578  ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4579  Operands.emplace_back(
4580  AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4581  WidthOp.getEndLoc(), getContext()));
4582  }
4583  }
4584  } else if (NumOperands == 5) {
4585  // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4586  // UBFIZ -> UBFM aliases.
4587  if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4588  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4589  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4590  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4591 
4592  if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4593  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4594  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4595 
4596  if (Op3CE && Op4CE) {
4597  uint64_t Op3Val = Op3CE->getValue();
4598  uint64_t Op4Val = Op4CE->getValue();
4599 
4600  uint64_t RegWidth = 0;
4601  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4602  Op1.getReg()))
4603  RegWidth = 64;
4604  else
4605  RegWidth = 32;
4606 
4607  if (Op3Val >= RegWidth)
4608  return Error(Op3.getStartLoc(),
4609  "expected integer in range [0, 31]");
4610  if (Op4Val < 1 || Op4Val > RegWidth)
4611  return Error(Op4.getStartLoc(),
4612  "expected integer in range [1, 32]");
4613 
4614  uint64_t NewOp3Val = 0;
4615  if (RegWidth == 32)
4616  NewOp3Val = (32 - Op3Val) & 0x1f;
4617  else
4618  NewOp3Val = (64 - Op3Val) & 0x3f;
4619 
4620  uint64_t NewOp4Val = Op4Val - 1;
4621 
4622  if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4623  return Error(Op4.getStartLoc(),
4624  "requested insert overflows register");
4625 
4626  const MCExpr *NewOp3 =
4627  MCConstantExpr::create(NewOp3Val, getContext());
4628  const MCExpr *NewOp4 =
4629  MCConstantExpr::create(NewOp4Val, getContext());
4630  Operands[3] = AArch64Operand::CreateImm(
4631  NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4632  Operands[4] = AArch64Operand::CreateImm(
4633  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4634  if (Tok == "bfi")
4635  Operands[0] = AArch64Operand::CreateToken(
4636  "bfm", false, Op.getStartLoc(), getContext());
4637  else if (Tok == "sbfiz")
4638  Operands[0] = AArch64Operand::CreateToken(
4639  "sbfm", false, Op.getStartLoc(), getContext());
4640  else if (Tok == "ubfiz")
4641  Operands[0] = AArch64Operand::CreateToken(
4642  "ubfm", false, Op.getStartLoc(), getContext());
4643  else
4644  llvm_unreachable("No valid mnemonic for alias?");
4645  }
4646  }
4647 
4648  // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4649  // UBFX -> UBFM aliases.
4650  } else if (NumOperands == 5 &&
4651  (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4652  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4653  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4654  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4655 
4656  if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4657  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4658  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4659 
4660  if (Op3CE && Op4CE) {
4661  uint64_t Op3Val = Op3CE->getValue();
4662  uint64_t Op4Val = Op4CE->getValue();
4663 
4664  uint64_t RegWidth = 0;
4665  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4666  Op1.getReg()))
4667  RegWidth = 64;
4668  else
4669  RegWidth = 32;
4670 
4671  if (Op3Val >= RegWidth)
4672  return Error(Op3.getStartLoc(),
4673  "expected integer in range [0, 31]");
4674  if (Op4Val < 1 || Op4Val > RegWidth)
4675  return Error(Op4.getStartLoc(),
4676  "expected integer in range [1, 32]");
4677 
4678  uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4679 
4680  if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4681  return Error(Op4.getStartLoc(),
4682  "requested extract overflows register");
4683 
4684  const MCExpr *NewOp4 =
4685  MCConstantExpr::create(NewOp4Val, getContext());
4686  Operands[4] = AArch64Operand::CreateImm(
4687  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4688  if (Tok == "bfxil")
4689  Operands[0] = AArch64Operand::CreateToken(
4690  "bfm", false, Op.getStartLoc(), getContext());
4691  else if (Tok == "sbfx")
4692  Operands[0] = AArch64Operand::CreateToken(
4693  "sbfm", false, Op.getStartLoc(), getContext());
4694  else if (Tok == "ubfx")
4695  Operands[0] = AArch64Operand::CreateToken(
4696  "ubfm", false, Op.getStartLoc(), getContext());
4697  else
4698  llvm_unreachable("No valid mnemonic for alias?");
4699  }
4700  }
4701  }
4702  }
4703 
4704  // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4705  // instruction for FP registers correctly in some rare circumstances. Convert
4706  // it to a safe instruction and warn (because silently changing someone's
4707  // assembly is rude).
4708  if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4709  NumOperands == 4 && Tok == "movi") {
4710  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4711  AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4712  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4713  if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4714  (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4715  StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4716  if (Suffix.lower() == ".2d" &&
4717  cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4718  Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4719  " correctly on this CPU, converting to equivalent movi.16b");
4720  // Switch the suffix to .16b.
4721  unsigned Idx = Op1.isToken() ? 1 : 2;
4722  Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4723  getContext());
4724  }
4725  }
4726  }
4727 
4728  // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4729  // InstAlias can't quite handle this since the reg classes aren't
4730  // subclasses.
4731  if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4732  // The source register can be Wn here, but the matcher expects a
4733  // GPR64. Twiddle it here if necessary.
4734  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4735  if (Op.isScalarReg()) {
4736  unsigned Reg = getXRegFromWReg(Op.getReg());
4737  Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4738  Op.getStartLoc(), Op.getEndLoc(),
4739  getContext());
4740  }
4741  }
4742  // FIXME: Likewise for sxt[bh] with a Xd dst operand
4743  else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4744  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4745  if (Op.isScalarReg() &&
4746  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4747  Op.getReg())) {
4748  // The source register can be Wn here, but the matcher expects a
4749  // GPR64. Twiddle it here if necessary.
4750  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4751  if (Op.isScalarReg()) {
4752  unsigned Reg = getXRegFromWReg(Op.getReg());
4753  Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4754  Op.getStartLoc(),
4755  Op.getEndLoc(), getContext());
4756  }
4757  }
4758  }
4759  // FIXME: Likewise for uxt[bh] with a Xd dst operand
4760  else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4761  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4762  if (Op.isScalarReg() &&
4763  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4764  Op.getReg())) {
4765  // The source register can be Wn here, but the matcher expects a
4766  // GPR32. Twiddle it here if necessary.
4767  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4768  if (Op.isScalarReg()) {
4769  unsigned Reg = getWRegFromXReg(Op.getReg());
4770  Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4771  Op.getStartLoc(),
4772  Op.getEndLoc(), getContext());
4773  }
4774  }
4775  }
4776 
4777  MCInst Inst;
4778  FeatureBitset MissingFeatures;
4779  // First try to match against the secondary set of tables containing the
4780  // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4781  unsigned MatchResult =
4782  MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4783  MatchingInlineAsm, 1);
4784 
4785  // If that fails, try against the alternate table containing long-form NEON:
4786  // "fadd v0.2s, v1.2s, v2.2s"
4787  if (MatchResult != Match_Success) {
4788  // But first, save the short-form match result: we can use it in case the
4789  // long-form match also fails.
4790  auto ShortFormNEONErrorInfo = ErrorInfo;
4791  auto ShortFormNEONMatchResult = MatchResult;
4792  auto ShortFormNEONMissingFeatures = MissingFeatures;
4793 
4794  MatchResult =
4795  MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4796  MatchingInlineAsm, 0);
4797 
4798  // Now, both matches failed, and the long-form match failed on the mnemonic
4799  // suffix token operand. The short-form match failure is probably more
4800  // relevant: use it instead.
4801  if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4802  Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4803  ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4804  MatchResult = ShortFormNEONMatchResult;
4805  ErrorInfo = ShortFormNEONErrorInfo;
4806  MissingFeatures = ShortFormNEONMissingFeatures;
4807  }
4808  }
4809 
4810  switch (MatchResult) {
4811  case Match_Success: {
4812  // Perform range checking and other semantic validations
4813  SmallVector<SMLoc, 8> OperandLocs;
4814  NumOperands = Operands.size();
4815  for (unsigned i = 1; i < NumOperands; ++i)
4816  OperandLocs.push_back(Operands[i]->getStartLoc());
4817  if (validateInstruction(Inst, IDLoc, OperandLocs))
4818  return true;
4819 
4820  Inst.setLoc(IDLoc);
4821  Out.EmitInstruction(Inst, getSTI());
4822  return false;
4823  }
4824  case Match_MissingFeature: {
4825  assert(MissingFeatures.any() && "Unknown missing feature!");
4826  // Special case the error message for the very common case where only
4827  // a single subtarget feature is missing (neon, e.g.).
4828  std::string Msg = "instruction requires:";
4829  for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
4830  if (MissingFeatures[i]) {
4831  Msg += " ";
4832  Msg += getSubtargetFeatureName(i);
4833  }
4834  }
4835  return Error(IDLoc, Msg);
4836  }
4837  case Match_MnemonicFail:
4838  return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
4839  case Match_InvalidOperand: {
4840  SMLoc ErrorLoc = IDLoc;
4841 
4842  if (ErrorInfo != ~0ULL) {
4843  if (ErrorInfo >= Operands.size())
4844  return Error(IDLoc, "too few operands for instruction",
4845  SMRange(IDLoc, getTok().getLoc()));
4846 
4847  ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4848  if (ErrorLoc == SMLoc())
4849  ErrorLoc = IDLoc;
4850  }
4851  // If the match failed on a suffix token operand, tweak the diagnostic
4852  // accordingly.
4853  if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4854  ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4855  MatchResult = Match_InvalidSuffix;
4856 
4857  return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4858  }
4859  case Match_InvalidTiedOperand:
4860  case Match_InvalidMemoryIndexed1:
4861  case Match_InvalidMemoryIndexed2:
4862  case Match_InvalidMemoryIndexed4:
4863  case Match_InvalidMemoryIndexed8:
4864  case Match_InvalidMemoryIndexed16:
4865  case Match_InvalidCondCode:
4866  case Match_AddSubRegExtendSmall:
4867  case Match_AddSubRegExtendLarge:
4868  case Match_AddSubSecondSource:
4869  case Match_LogicalSecondSource:
4870  case Match_AddSubRegShift32:
4871  case Match_AddSubRegShift64:
4872  case Match_InvalidMovImm32Shift:
4873  case Match_InvalidMovImm64Shift:
4874  case Match_InvalidFPImm:
4875  case Match_InvalidMemoryWExtend8:
4876  case Match_InvalidMemoryWExtend16:
4877  case Match_InvalidMemoryWExtend32:
4878  case Match_InvalidMemoryWExtend64:
4879  case Match_InvalidMemoryWExtend128:
4880  case Match_InvalidMemoryXExtend8:
4881  case Match_InvalidMemoryXExtend16:
4882  case Match_InvalidMemoryXExtend32:
4883  case Match_InvalidMemoryXExtend64:
4884  case Match_InvalidMemoryXExtend128:
4885  case Match_InvalidMemoryIndexed1SImm4:
4886  case Match_InvalidMemoryIndexed2SImm4:
4887  case Match_InvalidMemoryIndexed3SImm4:
4888  case Match_InvalidMemoryIndexed4SImm4:
4889  case Match_InvalidMemoryIndexed1SImm6:
4890  case Match_InvalidMemoryIndexed16SImm4:
4891  case Match_InvalidMemoryIndexed4SImm7:
4892  case Match_InvalidMemoryIndexed8SImm7:
4893  case Match_InvalidMemoryIndexed16SImm7:
4894  case Match_InvalidMemoryIndexed8UImm5:
4895  case Match_InvalidMemoryIndexed4UImm5:
4896  case Match_InvalidMemoryIndexed2UImm5:
4897  case Match_InvalidMemoryIndexed1UImm6:
4898  case Match_InvalidMemoryIndexed2UImm6:
4899  case Match_InvalidMemoryIndexed4UImm6:
4900  case Match_InvalidMemoryIndexed8UImm6:
4901  case Match_InvalidMemoryIndexed16UImm6:
4902  case Match_InvalidMemoryIndexedSImm6:
4903  case Match_InvalidMemoryIndexedSImm5:
4904  case Match_InvalidMemoryIndexedSImm8:
4905  case Match_InvalidMemoryIndexedSImm9:
4906  case Match_InvalidMemoryIndexed16SImm9:
4907  case Match_InvalidMemoryIndexed8SImm10:
4908  case Match_InvalidImm0_1:
4909  case Match_InvalidImm0_7:
4910  case Match_InvalidImm0_15:
4911  case Match_InvalidImm0_31:
4912  case Match_InvalidImm0_63:
4913  case Match_InvalidImm0_127:
4914  case Match_InvalidImm0_255:
4915  case Match_InvalidImm0_65535:
4916  case Match_InvalidImm1_8:
4917  case Match_InvalidImm1_16:
4918  case Match_InvalidImm1_32:
4919  case Match_InvalidImm1_64:
4920  case Match_InvalidSVEAddSubImm8:
4921  case Match_InvalidSVEAddSubImm16:
4922  case Match_InvalidSVEAddSubImm32:
4923  case Match_InvalidSVEAddSubImm64:
4924  case Match_InvalidSVECpyImm8:
4925  case Match_InvalidSVECpyImm16:
4926  case Match_InvalidSVECpyImm32:
4927  case Match_InvalidSVECpyImm64:
4928  case Match_InvalidIndexRange1_1:
4929  case Match_InvalidIndexRange0_15:
4930  case Match_InvalidIndexRange0_7:
4931  case Match_InvalidIndexRange0_3:
4932  case Match_InvalidIndexRange0_1:
4933  case Match_InvalidSVEIndexRange0_63:
4934  case Match_InvalidSVEIndexRange0_31:
4935  case Match_InvalidSVEIndexRange0_15:
4936  case Match_InvalidSVEIndexRange0_7:
4937  case Match_InvalidSVEIndexRange0_3:
4938  case Match_InvalidLabel:
4939  case Match_InvalidComplexRotationEven:
4940  case Match_InvalidComplexRotationOdd:
4941  case Match_InvalidGPR64shifted8:
4942  case Match_InvalidGPR64shifted16:
4943  case Match_InvalidGPR64shifted32:
4944  case Match_InvalidGPR64shifted64:
4945  case Match_InvalidGPR64NoXZRshifted8:
4946  case Match_InvalidGPR64NoXZRshifted16:
4947  case Match_InvalidGPR64NoXZRshifted32:
4948  case Match_InvalidGPR64NoXZRshifted64:
4949  case Match_InvalidZPR32UXTW8:
4950  case Match_InvalidZPR32UXTW16:
4951  case Match_InvalidZPR32UXTW32:
4952  case Match_InvalidZPR32UXTW64:
4953  case Match_InvalidZPR32SXTW8:
4954  case Match_InvalidZPR32SXTW16:
4955  case Match_InvalidZPR32SXTW32:
4956  case Match_InvalidZPR32SXTW64:
4957  case Match_InvalidZPR64UXTW8:
4958  case Match_InvalidZPR64SXTW8:
4959  case Match_InvalidZPR64UXTW16:
4960  case Match_InvalidZPR64SXTW16:
4961  case Match_InvalidZPR64UXTW32:
4962  case Match_InvalidZPR64SXTW32:
4963  case Match_InvalidZPR64UXTW64:
4964  case Match_InvalidZPR64SXTW64:
4965  case Match_InvalidZPR32LSL8:
4966  case Match_InvalidZPR32LSL16:
4967  case Match_InvalidZPR32LSL32:
4968  case Match_InvalidZPR32LSL64:
4969  case Match_InvalidZPR64LSL8:
4970  case Match_InvalidZPR64LSL16:
4971  case Match_InvalidZPR64LSL32:
4972  case Match_InvalidZPR64LSL64:
4973  case Match_InvalidZPR0:
4974  case Match_InvalidZPR8:
4975  case Match_InvalidZPR16:
4976  case Match_InvalidZPR32:
4977  case Match_InvalidZPR64:
4978  case Match_InvalidZPR128:
4979  case Match_InvalidZPR_3b8:
4980  case Match_InvalidZPR_3b16:
4981  case Match_InvalidZPR_3b32:
4982  case Match_InvalidZPR_4b16:
4983  case Match_InvalidZPR_4b32:
4984  case Match_InvalidZPR_4b64:
4985  case Match_InvalidSVEPredicateAnyReg:
4986  case Match_InvalidSVEPattern:
4987  case Match_InvalidSVEPredicateBReg:
4988  case Match_InvalidSVEPredicateHReg:
4989  case Match_InvalidSVEPredicateSReg:
4990  case Match_InvalidSVEPredicateDReg:
4991  case Match_InvalidSVEPredicate3bAnyReg:
4992  case Match_InvalidSVEPredicate3bBReg:
4993  case Match_InvalidSVEPredicate3bHReg:
4994  case Match_InvalidSVEPredicate3bSReg:
4995  case Match_InvalidSVEPredicate3bDReg:
4996  case Match_InvalidSVEExactFPImmOperandHalfOne:
4997  case Match_InvalidSVEExactFPImmOperandHalfTwo:
4998  case Match_InvalidSVEExactFPImmOperandZeroOne:
4999  case Match_MSR:
5000  case Match_MRS: {
5001  if (ErrorInfo >= Operands.size())
5002  return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5003  // Any time we get here, there's nothing fancy to do. Just get the
5004  // operand SMLoc and display the diagnostic.
5005  SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5006  if (ErrorLoc == SMLoc())
5007  ErrorLoc = IDLoc;
5008  return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5009  }
5010  }
5011 
5012  llvm_unreachable("Implement any new match types added!");
5013 }
5014 
5015 /// ParseDirective parses the arm specific directives
5016 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5018  getContext().getObjectFileInfo()->getObjectFileType();
5019  bool IsMachO = Format == MCObjectFileInfo::IsMachO;
5020 
5021  StringRef IDVal = DirectiveID.getIdentifier();
5022  SMLoc Loc = DirectiveID.getLoc();
5023  if (IDVal == ".arch")
5024  parseDirectiveArch(Loc);
5025  else if (IDVal == ".cpu")
5026  parseDirectiveCPU(Loc);
5027  else if (IDVal == ".tlsdesccall")
5028  parseDirectiveTLSDescCall(Loc);
5029  else if (IDVal == ".ltorg" || IDVal == ".pool")
5030  parseDirectiveLtorg(Loc);
5031  else if (IDVal == ".unreq")
5032  parseDirectiveUnreq(Loc);
5033  else if (IDVal == ".inst")
5034  parseDirectiveInst(Loc);
5035  else if (IDVal == ".cfi_negate_ra_state")
5036  parseDirectiveCFINegateRAState();
5037  else if (IDVal == ".cfi_b_key_frame")
5038  parseDirectiveCFIBKeyFrame();
5039  else if (IDVal == ".arch_extension")
5040  parseDirectiveArchExtension(Loc);
5041  else if (IsMachO) {
5042  if (IDVal == MCLOHDirectiveName())
5043  parseDirectiveLOH(IDVal, Loc);
5044  else
5045  return true;
5046  } else
5047  return true;
5048  return false;
5049 }
5050 
5052  SmallVector<StringRef, 4> &RequestedExtensions) {
5053  const bool NoCrypto =
5054  (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5055  "nocrypto") != std::end(RequestedExtensions));
5056  const bool Crypto =
5057  (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5058  "crypto") != std::end(RequestedExtensions));
5059 
5060  if (!NoCrypto && Crypto) {
5061  switch (ArchKind) {
5062  default:
5063  // Map 'generic' (and others) to sha2 and aes, because
5064  // that was the traditional meaning of crypto.
5065  case AArch64::ArchKind::ARMV8_1A:
5066  case AArch64::ArchKind::ARMV8_2A:
5067  case AArch64::ArchKind::ARMV8_3A:
5068  RequestedExtensions.push_back("sha2");
5069  RequestedExtensions.push_back("aes");
5070  break;
5071  case AArch64::ArchKind::ARMV8_4A:
5072  case AArch64::ArchKind::ARMV8_5A:
5073  RequestedExtensions.push_back("sm4");
5074  RequestedExtensions.push_back("sha3");
5075  RequestedExtensions.push_back("sha2");
5076  RequestedExtensions.push_back("aes");
5077  break;
5078  }
5079  } else if (NoCrypto) {
5080  switch (ArchKind) {
5081  default:
5082  // Map 'generic' (and others) to sha2 and aes, because
5083  // that was the traditional meaning of crypto.
5084  case AArch64::ArchKind::ARMV8_1A:
5085  case AArch64::ArchKind::ARMV8_2A:
5086  case AArch64::ArchKind::ARMV8_3A:
5087  RequestedExtensions.push_back("nosha2");
5088  RequestedExtensions.push_back("noaes");
5089  break;
5090  case AArch64::ArchKind::ARMV8_4A:
5091  case AArch64::ArchKind::ARMV8_5A:
5092  RequestedExtensions.push_back("nosm4");
5093  RequestedExtensions.push_back("nosha3");
5094  RequestedExtensions.push_back("nosha2");
5095  RequestedExtensions.push_back("noaes");
5096  break;
5097  }
5098  }
5099 }
5100 
5101 /// parseDirectiveArch
5102 /// ::= .arch token
5103 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
5104  SMLoc ArchLoc = getLoc();
5105 
5106  StringRef Arch, ExtensionString;
5107  std::tie(Arch, ExtensionString) =
5108  getParser().parseStringToEndOfStatement().trim().split('+');
5109 
5111  if (ID == AArch64::ArchKind::INVALID)
5112  return Error(ArchLoc, "unknown arch name");
5113 
5114  if (parseToken(AsmToken::EndOfStatement))
5115  return true;
5116 
5117  // Get the architecture and extension features.
5118  std::vector<StringRef> AArch64Features;
5119  AArch64::getArchFeatures(ID, AArch64Features);
5121  AArch64Features);
5122 
5123  MCSubtargetInfo &STI = copySTI();
5124  std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
5125  STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
5126 
5127  SmallVector<StringRef, 4> RequestedExtensions;
5128  if (!ExtensionString.empty())
5129  ExtensionString.split(RequestedExtensions, '+');
5130 
5131  ExpandCryptoAEK(ID, RequestedExtensions);
5132 
5133  FeatureBitset Features = STI.getFeatureBits();
5134  for (auto Name : RequestedExtensions) {
5135  bool EnableFeature = true;
5136 
5137  if (Name.startswith_lower("no")) {
5138  EnableFeature = false;
5139  Name = Name.substr(2);
5140  }
5141 
5142  for (const auto &Extension : ExtensionMap) {
5143  if (Extension.Name != Name)
5144  continue;
5145 
5146  if (Extension.Features.none())
5147  report_fatal_error("unsupported architectural extension: " + Name);
5148 
5149  FeatureBitset ToggleFeatures = EnableFeature
5150  ? (~Features & Extension.Features)
5151  : ( Features & Extension.Features);
5152  FeatureBitset Features =
5153  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5154  setAvailableFeatures(Features);
5155  break;
5156  }
5157  }
5158  return false;
5159 }
5160 
5161 /// parseDirectiveArchExtension
5162 /// ::= .arch_extension [no]feature
5163 bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
5164  SMLoc ExtLoc = getLoc();
5165 
5166  StringRef Name = getParser().parseStringToEndOfStatement().trim();
5167 
5168  if (parseToken(AsmToken::EndOfStatement,
5169  "unexpected token in '.arch_extension' directive"))
5170  return true;
5171 
5172  bool EnableFeature = true;
5173  if (Name.startswith_lower("no")) {
5174  EnableFeature = false;
5175  Name = Name.substr(2);
5176  }
5177 
5178  MCSubtargetInfo &STI = copySTI();
5179  FeatureBitset Features = STI.getFeatureBits();
5180  for (const auto &Extension : ExtensionMap) {
5181  if (Extension.Name != Name)
5182  continue;
5183 
5184  if (Extension.Features.none())
5185  return Error(ExtLoc, "unsupported architectural extension: " + Name);
5186 
5187  FeatureBitset ToggleFeatures = EnableFeature
5188  ? (~Features & Extension.Features)
5189  : (Features & Extension.Features);
5190  FeatureBitset Features =
5191  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5192  setAvailableFeatures(Features);
5193  return false;
5194  }
5195 
5196  return Error(ExtLoc, "unknown architectural extension: " + Name);
5197 }
5198 
5199 static SMLoc incrementLoc(SMLoc L, int Offset) {
5200  return SMLoc::getFromPointer(L.getPointer() + Offset);
5201 }
5202 
5203 /// parseDirectiveCPU
5204 /// ::= .cpu id
5205 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5206  SMLoc CurLoc = getLoc();
5207 
5208  StringRef CPU, ExtensionString;
5209  std::tie(CPU, ExtensionString) =
5210  getParser().parseStringToEndOfStatement().trim().split('+');
5211 
5212  if (parseToken(AsmToken::EndOfStatement))
5213  return true;
5214 
5215  SmallVector<StringRef, 4> RequestedExtensions;
5216  if (!ExtensionString.empty())
5217  ExtensionString.split(RequestedExtensions, '+');
5218 
5219  // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5220  // once that is tablegen'ed
5221  if (!getSTI().isCPUStringValid(CPU)) {
5222  Error(CurLoc, "unknown CPU name");
5223  return false;
5224  }
5225 
5226  MCSubtargetInfo &STI = copySTI();
5227  STI.setDefaultFeatures(CPU, "");
5228  CurLoc = incrementLoc(CurLoc, CPU.size());
5229 
5230  ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5231 
5232  FeatureBitset Features = STI.getFeatureBits();
5233  for (auto Name : RequestedExtensions) {
5234  // Advance source location past '+'.
5235  CurLoc = incrementLoc(CurLoc, 1);
5236 
5237  bool EnableFeature = true;
5238 
5239  if (Name.startswith_lower("no")) {
5240  EnableFeature = false;
5241  Name = Name.substr(2);
5242  }
5243 
5244  bool FoundExtension = false;
5245  for (const auto &Extension : ExtensionMap) {
5246  if (Extension.Name != Name)
5247  continue;
5248 
5249  if (Extension.Features.none())
5250  report_fatal_error("unsupported architectural extension: " + Name);
5251 
5252  FeatureBitset ToggleFeatures = EnableFeature
5253  ? (~Features & Extension.Features)
5254  : ( Features & Extension.Features);
5255  FeatureBitset Features =
5256  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5257  setAvailableFeatures(Features);
5258  FoundExtension = true;
5259 
5260  break;
5261  }
5262 
5263  if (!FoundExtension)
5264  Error(CurLoc, "unsupported architectural extension");
5265 
5266  CurLoc = incrementLoc(CurLoc, Name.size());
5267  }
5268  return false;
5269 }
5270 
5271 /// parseDirectiveInst
5272 /// ::= .inst opcode [, ...]
5273 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5274  if (getLexer().is(AsmToken::EndOfStatement))
5275  return Error(Loc, "expected expression following '.inst' directive");
5276 
5277  auto parseOp = [&]() -> bool {
5278  SMLoc L = getLoc();
5279  const MCExpr *Expr = nullptr;
5280  if (check(getParser().parseExpression(Expr), L, "expected expression"))
5281  return true;
5282  const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5283  if (check(!Value, L, "expected constant expression"))
5284  return true;
5285  getTargetStreamer().emitInst(Value->getValue());
5286  return false;
5287  };
5288 
5289  if (parseMany(parseOp))
5290  return addErrorSuffix(" in '.inst' directive");
5291  return false;
5292 }
5293 
5294 // parseDirectiveTLSDescCall:
5295 // ::= .tlsdesccall symbol
5296 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5297  StringRef Name;
5298  if (check(getParser().parseIdentifier(Name), L,
5299  "expected symbol after directive") ||
5300  parseToken(AsmToken::EndOfStatement))
5301  return true;
5302 
5303  MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5304  const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5305  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5306 
5307  MCInst Inst;
5308  Inst.setOpcode(AArch64::TLSDESCCALL);
5309  Inst.addOperand(MCOperand::createExpr(Expr));
5310 
5311  getParser().getStreamer().EmitInstruction(Inst, getSTI());
5312  return false;
5313 }
5314 
5315 /// ::= .loh <lohName | lohId> label1, ..., labelN
5316 /// The number of arguments depends on the loh identifier.
5317 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5318  MCLOHType Kind;
5319  if (getParser().getTok().isNot(AsmToken::Identifier)) {
5320  if (getParser().getTok().isNot(AsmToken::Integer))
5321  return TokError("expected an identifier or a number in directive");
5322  // We successfully get a numeric value for the identifier.
5323  // Check if it is valid.
5324  int64_t Id = getParser().getTok().getIntVal();
5325  if (Id <= -1U && !isValidMCLOHType(Id))
5326  return TokError("invalid numeric identifier in directive");
5327  Kind = (MCLOHType)Id;
5328  } else {
5329  StringRef Name = getTok().getIdentifier();
5330  // We successfully parse an identifier.
5331  // Check if it is a recognized one.
5332  int Id = MCLOHNameToId(Name);
5333 
5334  if (Id == -1)
5335  return TokError("invalid identifier in directive");
5336  Kind = (MCLOHType)Id;
5337  }
5338  // Consume the identifier.
5339  Lex();
5340  // Get the number of arguments of this LOH.
5341  int NbArgs = MCLOHIdToNbArgs(Kind);
5342 
5343  assert(NbArgs != -1 && "Invalid number of arguments");
5344 
5346  for (int Idx = 0; Idx < NbArgs; ++Idx) {
5347  StringRef Name;
5348  if (getParser().parseIdentifier(Name))
5349  return TokError("expected identifier in directive");
5350  Args.push_back(getContext().getOrCreateSymbol(Name));
5351 
5352  if (Idx + 1 == NbArgs)
5353  break;
5354  if (parseToken(AsmToken::Comma,
5355  "unexpected token in '" + Twine(IDVal) + "' directive"))
5356  return true;
5357  }
5358  if (parseToken(AsmToken::EndOfStatement,
5359  "unexpected token in '" + Twine(IDVal) + "' directive"))
5360  return true;
5361 
5362  getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
5363  return false;
5364 }
5365 
5366 /// parseDirectiveLtorg
5367 /// ::= .ltorg | .pool
5368 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5369  if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5370  return true;
5371  getTargetStreamer().emitCurrentConstantPool();
5372  return false;
5373 }
5374 
5375 /// parseDirectiveReq
5376 /// ::= name .req registername
5377 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5378  MCAsmParser &Parser = getParser();
5379  Parser.Lex(); // Eat the '.req' token.
5380  SMLoc SRegLoc = getLoc();
5382  unsigned RegNum;
5383  OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5384 
<