LLVM  8.0.0svn
AArch64AsmParser.cpp
Go to the documentation of this file.
1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
14 #include "AArch64InstrInfo.h"
15 #include "Utils/AArch64BaseInfo.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/StringMap.h"
23 #include "llvm/ADT/StringRef.h"
24 #include "llvm/ADT/StringSwitch.h"
25 #include "llvm/ADT/Twine.h"
26 #include "llvm/MC/MCContext.h"
27 #include "llvm/MC/MCExpr.h"
28 #include "llvm/MC/MCInst.h"
36 #include "llvm/MC/MCRegisterInfo.h"
37 #include "llvm/MC/MCStreamer.h"
39 #include "llvm/MC/MCSymbol.h"
42 #include "llvm/MC/MCValue.h"
43 #include "llvm/Support/Casting.h"
44 #include "llvm/Support/Compiler.h"
47 #include "llvm/Support/SMLoc.h"
51 #include <cassert>
52 #include <cctype>
53 #include <cstdint>
54 #include <cstdio>
55 #include <string>
56 #include <tuple>
57 #include <utility>
58 #include <vector>
59 
60 using namespace llvm;
61 
62 namespace {
63 
64 enum class RegKind {
65  Scalar,
66  NeonVector,
67  SVEDataVector,
68  SVEPredicateVector
69 };
70 
72  EqualsReg,
73  EqualsSuperReg,
74  EqualsSubReg
75 };
76 
77 class AArch64AsmParser : public MCTargetAsmParser {
78 private:
79  StringRef Mnemonic; ///< Instruction mnemonic.
80 
81  // Map of register aliases registers via the .req directive.
83 
84  class PrefixInfo {
85  public:
86  static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
87  PrefixInfo Prefix;
88  switch (Inst.getOpcode()) {
89  case AArch64::MOVPRFX_ZZ:
90  Prefix.Active = true;
91  Prefix.Dst = Inst.getOperand(0).getReg();
92  break;
93  case AArch64::MOVPRFX_ZPmZ_B:
94  case AArch64::MOVPRFX_ZPmZ_H:
95  case AArch64::MOVPRFX_ZPmZ_S:
96  case AArch64::MOVPRFX_ZPmZ_D:
97  Prefix.Active = true;
98  Prefix.Predicated = true;
99  Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
100  assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
101  "No destructive element size set for movprfx");
102  Prefix.Dst = Inst.getOperand(0).getReg();
103  Prefix.Pg = Inst.getOperand(2).getReg();
104  break;
105  case AArch64::MOVPRFX_ZPzZ_B:
106  case AArch64::MOVPRFX_ZPzZ_H:
107  case AArch64::MOVPRFX_ZPzZ_S:
108  case AArch64::MOVPRFX_ZPzZ_D:
109  Prefix.Active = true;
110  Prefix.Predicated = true;
111  Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
112  assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
113  "No destructive element size set for movprfx");
114  Prefix.Dst = Inst.getOperand(0).getReg();
115  Prefix.Pg = Inst.getOperand(1).getReg();
116  break;
117  default:
118  break;
119  }
120 
121  return Prefix;
122  }
123 
124  PrefixInfo() : Active(false), Predicated(false) {}
125  bool isActive() const { return Active; }
126  bool isPredicated() const { return Predicated; }
127  unsigned getElementSize() const {
128  assert(Predicated);
129  return ElementSize;
130  }
131  unsigned getDstReg() const { return Dst; }
132  unsigned getPgReg() const {
133  assert(Predicated);
134  return Pg;
135  }
136 
137  private:
138  bool Active;
139  bool Predicated;
140  unsigned ElementSize;
141  unsigned Dst;
142  unsigned Pg;
143  } NextPrefix;
144 
145  AArch64TargetStreamer &getTargetStreamer() {
146  MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
147  return static_cast<AArch64TargetStreamer &>(TS);
148  }
149 
150  SMLoc getLoc() const { return getParser().getTok().getLoc(); }
151 
152  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
153  void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
154  AArch64CC::CondCode parseCondCodeString(StringRef Cond);
155  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
156  unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
157  bool parseRegister(OperandVector &Operands);
158  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
159  bool parseNeonVectorList(OperandVector &Operands);
160  bool parseOptionalMulOperand(OperandVector &Operands);
161  bool parseOperand(OperandVector &Operands, bool isCondCode,
162  bool invertCondCode);
163 
164  bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
165  OperandVector &Operands);
166 
167  bool parseDirectiveArch(SMLoc L);
168  bool parseDirectiveCPU(SMLoc L);
169  bool parseDirectiveInst(SMLoc L);
170 
171  bool parseDirectiveTLSDescCall(SMLoc L);
172 
173  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
174  bool parseDirectiveLtorg(SMLoc L);
175 
176  bool parseDirectiveReq(StringRef Name, SMLoc L);
177  bool parseDirectiveUnreq(SMLoc L);
178 
179  bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
181  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
182  OperandVector &Operands, MCStreamer &Out,
183  uint64_t &ErrorInfo,
184  bool MatchingInlineAsm) override;
185 /// @name Auto-generated Match Functions
186 /// {
187 
188 #define GET_ASSEMBLER_HEADER
189 #include "AArch64GenAsmMatcher.inc"
190 
191  /// }
192 
193  OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
194  OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
195  RegKind MatchKind);
196  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
197  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
198  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
199  OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
200  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
201  template <bool IsSVEPrefetch = false>
202  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
203  OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
204  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
205  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
206  template<bool AddFPZeroAsLiteral>
207  OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
208  OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
209  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
210  bool tryParseNeonVectorRegister(OperandVector &Operands);
211  OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
212  OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
213  template <bool ParseShiftExtend,
214  RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
215  OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
216  template <bool ParseShiftExtend, bool ParseSuffix>
217  OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
218  OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
219  template <RegKind VectorKind>
220  OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
221  bool ExpectMatch = false);
222  OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
223 
224 public:
225  enum AArch64MatchResultTy {
226  Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
227 #define GET_OPERAND_DIAGNOSTIC_TYPES
228 #include "AArch64GenAsmMatcher.inc"
229  };
230  bool IsILP32;
231 
232  AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
233  const MCInstrInfo &MII, const MCTargetOptions &Options)
234  : MCTargetAsmParser(Options, STI, MII) {
235  IsILP32 = Options.getABIName() == "ilp32";
237  MCStreamer &S = getParser().getStreamer();
238  if (S.getTargetStreamer() == nullptr)
239  new AArch64TargetStreamer(S);
240 
241  // Alias .hword/.word/xword to the target-independent .2byte/.4byte/.8byte
242  // directives as they have the same form and semantics:
243  /// ::= (.hword | .word | .xword ) [ expression (, expression)* ]
244  Parser.addAliasForDirective(".hword", ".2byte");
245  Parser.addAliasForDirective(".word", ".4byte");
246  Parser.addAliasForDirective(".xword", ".8byte");
247 
248  // Initialize the set of available features.
249  setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
250  }
251 
252  bool regsEqual(const MCParsedAsmOperand &Op1,
253  const MCParsedAsmOperand &Op2) const override;
254  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
255  SMLoc NameLoc, OperandVector &Operands) override;
256  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
257  bool ParseDirective(AsmToken DirectiveID) override;
258  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
259  unsigned Kind) override;
260 
261  static bool classifySymbolRef(const MCExpr *Expr,
262  AArch64MCExpr::VariantKind &ELFRefKind,
263  MCSymbolRefExpr::VariantKind &DarwinRefKind,
264  int64_t &Addend);
265 };
266 
267 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
268 /// instruction.
269 class AArch64Operand : public MCParsedAsmOperand {
270 private:
271  enum KindTy {
272  k_Immediate,
273  k_ShiftedImm,
274  k_CondCode,
275  k_Register,
276  k_VectorList,
277  k_VectorIndex,
278  k_Token,
279  k_SysReg,
280  k_SysCR,
281  k_Prefetch,
282  k_ShiftExtend,
283  k_FPImm,
284  k_Barrier,
285  k_PSBHint,
286  } Kind;
287 
288  SMLoc StartLoc, EndLoc;
289 
290  struct TokOp {
291  const char *Data;
292  unsigned Length;
293  bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
294  };
295 
296  // Separate shift/extend operand.
297  struct ShiftExtendOp {
299  unsigned Amount;
300  bool HasExplicitAmount;
301  };
302 
303  struct RegOp {
304  unsigned RegNum;
305  RegKind Kind;
306  int ElementWidth;
307 
308  // The register may be allowed as a different register class,
309  // e.g. for GPR64as32 or GPR32as64.
310  RegConstraintEqualityTy EqualityTy;
311 
312  // In some cases the shift/extend needs to be explicitly parsed together
313  // with the register, rather than as a separate operand. This is needed
314  // for addressing modes where the instruction as a whole dictates the
315  // scaling/extend, rather than specific bits in the instruction.
316  // By parsing them as a single operand, we avoid the need to pass an
317  // extra operand in all CodeGen patterns (because all operands need to
318  // have an associated value), and we avoid the need to update TableGen to
319  // accept operands that have no associated bits in the instruction.
320  //
321  // An added benefit of parsing them together is that the assembler
322  // can give a sensible diagnostic if the scaling is not correct.
323  //
324  // The default is 'lsl #0' (HasExplicitAmount = false) if no
325  // ShiftExtend is specified.
326  ShiftExtendOp ShiftExtend;
327  };
328 
329  struct VectorListOp {
330  unsigned RegNum;
331  unsigned Count;
332  unsigned NumElements;
333  unsigned ElementWidth;
335  };
336 
337  struct VectorIndexOp {
338  unsigned Val;
339  };
340 
341  struct ImmOp {
342  const MCExpr *Val;
343  };
344 
345  struct ShiftedImmOp {
346  const MCExpr *Val;
347  unsigned ShiftAmount;
348  };
349 
350  struct CondCodeOp {
351  AArch64CC::CondCode Code;
352  };
353 
354  struct FPImmOp {
355  uint64_t Val; // APFloat value bitcasted to uint64_t.
356  bool IsExact; // describes whether parsed value was exact.
357  };
358 
359  struct BarrierOp {
360  const char *Data;
361  unsigned Length;
362  unsigned Val; // Not the enum since not all values have names.
363  };
364 
365  struct SysRegOp {
366  const char *Data;
367  unsigned Length;
368  uint32_t MRSReg;
369  uint32_t MSRReg;
370  uint32_t PStateField;
371  };
372 
373  struct SysCRImmOp {
374  unsigned Val;
375  };
376 
377  struct PrefetchOp {
378  const char *Data;
379  unsigned Length;
380  unsigned Val;
381  };
382 
383  struct PSBHintOp {
384  const char *Data;
385  unsigned Length;
386  unsigned Val;
387  };
388 
389  struct ExtendOp {
390  unsigned Val;
391  };
392 
393  union {
394  struct TokOp Tok;
395  struct RegOp Reg;
396  struct VectorListOp VectorList;
397  struct VectorIndexOp VectorIndex;
398  struct ImmOp Imm;
399  struct ShiftedImmOp ShiftedImm;
400  struct CondCodeOp CondCode;
401  struct FPImmOp FPImm;
402  struct BarrierOp Barrier;
403  struct SysRegOp SysReg;
404  struct SysCRImmOp SysCRImm;
405  struct PrefetchOp Prefetch;
406  struct PSBHintOp PSBHint;
407  struct ShiftExtendOp ShiftExtend;
408  };
409 
410  // Keep the MCContext around as the MCExprs may need manipulated during
411  // the add<>Operands() calls.
412  MCContext &Ctx;
413 
414 public:
415  AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
416 
417  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
418  Kind = o.Kind;
419  StartLoc = o.StartLoc;
420  EndLoc = o.EndLoc;
421  switch (Kind) {
422  case k_Token:
423  Tok = o.Tok;
424  break;
425  case k_Immediate:
426  Imm = o.Imm;
427  break;
428  case k_ShiftedImm:
429  ShiftedImm = o.ShiftedImm;
430  break;
431  case k_CondCode:
432  CondCode = o.CondCode;
433  break;
434  case k_FPImm:
435  FPImm = o.FPImm;
436  break;
437  case k_Barrier:
438  Barrier = o.Barrier;
439  break;
440  case k_Register:
441  Reg = o.Reg;
442  break;
443  case k_VectorList:
444  VectorList = o.VectorList;
445  break;
446  case k_VectorIndex:
447  VectorIndex = o.VectorIndex;
448  break;
449  case k_SysReg:
450  SysReg = o.SysReg;
451  break;
452  case k_SysCR:
453  SysCRImm = o.SysCRImm;
454  break;
455  case k_Prefetch:
456  Prefetch = o.Prefetch;
457  break;
458  case k_PSBHint:
459  PSBHint = o.PSBHint;
460  break;
461  case k_ShiftExtend:
462  ShiftExtend = o.ShiftExtend;
463  break;
464  }
465  }
466 
467  /// getStartLoc - Get the location of the first token of this operand.
468  SMLoc getStartLoc() const override { return StartLoc; }
469  /// getEndLoc - Get the location of the last token of this operand.
470  SMLoc getEndLoc() const override { return EndLoc; }
471 
472  StringRef getToken() const {
473  assert(Kind == k_Token && "Invalid access!");
474  return StringRef(Tok.Data, Tok.Length);
475  }
476 
477  bool isTokenSuffix() const {
478  assert(Kind == k_Token && "Invalid access!");
479  return Tok.IsSuffix;
480  }
481 
482  const MCExpr *getImm() const {
483  assert(Kind == k_Immediate && "Invalid access!");
484  return Imm.Val;
485  }
486 
487  const MCExpr *getShiftedImmVal() const {
488  assert(Kind == k_ShiftedImm && "Invalid access!");
489  return ShiftedImm.Val;
490  }
491 
492  unsigned getShiftedImmShift() const {
493  assert(Kind == k_ShiftedImm && "Invalid access!");
494  return ShiftedImm.ShiftAmount;
495  }
496 
497  AArch64CC::CondCode getCondCode() const {
498  assert(Kind == k_CondCode && "Invalid access!");
499  return CondCode.Code;
500  }
501 
502  APFloat getFPImm() const {
503  assert (Kind == k_FPImm && "Invalid access!");
504  return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
505  }
506 
507  bool getFPImmIsExact() const {
508  assert (Kind == k_FPImm && "Invalid access!");
509  return FPImm.IsExact;
510  }
511 
512  unsigned getBarrier() const {
513  assert(Kind == k_Barrier && "Invalid access!");
514  return Barrier.Val;
515  }
516 
517  StringRef getBarrierName() const {
518  assert(Kind == k_Barrier && "Invalid access!");
519  return StringRef(Barrier.Data, Barrier.Length);
520  }
521 
522  unsigned getReg() const override {
523  assert(Kind == k_Register && "Invalid access!");
524  return Reg.RegNum;
525  }
526 
527  RegConstraintEqualityTy getRegEqualityTy() const {
528  assert(Kind == k_Register && "Invalid access!");
529  return Reg.EqualityTy;
530  }
531 
532  unsigned getVectorListStart() const {
533  assert(Kind == k_VectorList && "Invalid access!");
534  return VectorList.RegNum;
535  }
536 
537  unsigned getVectorListCount() const {
538  assert(Kind == k_VectorList && "Invalid access!");
539  return VectorList.Count;
540  }
541 
542  unsigned getVectorIndex() const {
543  assert(Kind == k_VectorIndex && "Invalid access!");
544  return VectorIndex.Val;
545  }
546 
547  StringRef getSysReg() const {
548  assert(Kind == k_SysReg && "Invalid access!");
549  return StringRef(SysReg.Data, SysReg.Length);
550  }
551 
552  unsigned getSysCR() const {
553  assert(Kind == k_SysCR && "Invalid access!");
554  return SysCRImm.Val;
555  }
556 
557  unsigned getPrefetch() const {
558  assert(Kind == k_Prefetch && "Invalid access!");
559  return Prefetch.Val;
560  }
561 
562  unsigned getPSBHint() const {
563  assert(Kind == k_PSBHint && "Invalid access!");
564  return PSBHint.Val;
565  }
566 
567  StringRef getPSBHintName() const {
568  assert(Kind == k_PSBHint && "Invalid access!");
569  return StringRef(PSBHint.Data, PSBHint.Length);
570  }
571 
572  StringRef getPrefetchName() const {
573  assert(Kind == k_Prefetch && "Invalid access!");
574  return StringRef(Prefetch.Data, Prefetch.Length);
575  }
576 
577  AArch64_AM::ShiftExtendType getShiftExtendType() const {
578  if (Kind == k_ShiftExtend)
579  return ShiftExtend.Type;
580  if (Kind == k_Register)
581  return Reg.ShiftExtend.Type;
582  llvm_unreachable("Invalid access!");
583  }
584 
585  unsigned getShiftExtendAmount() const {
586  if (Kind == k_ShiftExtend)
587  return ShiftExtend.Amount;
588  if (Kind == k_Register)
589  return Reg.ShiftExtend.Amount;
590  llvm_unreachable("Invalid access!");
591  }
592 
593  bool hasShiftExtendAmount() const {
594  if (Kind == k_ShiftExtend)
595  return ShiftExtend.HasExplicitAmount;
596  if (Kind == k_Register)
597  return Reg.ShiftExtend.HasExplicitAmount;
598  llvm_unreachable("Invalid access!");
599  }
600 
601  bool isImm() const override { return Kind == k_Immediate; }
602  bool isMem() const override { return false; }
603 
604  bool isUImm6() const {
605  if (!isImm())
606  return false;
607  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
608  if (!MCE)
609  return false;
610  int64_t Val = MCE->getValue();
611  return (Val >= 0 && Val < 64);
612  }
613 
614  template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
615 
616  template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
617  return isImmScaled<Bits, Scale>(true);
618  }
619 
620  template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
621  return isImmScaled<Bits, Scale>(false);
622  }
623 
624  template <int Bits, int Scale>
625  DiagnosticPredicate isImmScaled(bool Signed) const {
626  if (!isImm())
628 
629  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
630  if (!MCE)
632 
633  int64_t MinVal, MaxVal;
634  if (Signed) {
635  int64_t Shift = Bits - 1;
636  MinVal = (int64_t(1) << Shift) * -Scale;
637  MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
638  } else {
639  MinVal = 0;
640  MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
641  }
642 
643  int64_t Val = MCE->getValue();
644  if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
646 
648  }
649 
650  DiagnosticPredicate isSVEPattern() const {
651  if (!isImm())
653  auto *MCE = dyn_cast<MCConstantExpr>(getImm());
654  if (!MCE)
656  int64_t Val = MCE->getValue();
657  if (Val >= 0 && Val < 32)
660  }
661 
662  bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
663  AArch64MCExpr::VariantKind ELFRefKind;
664  MCSymbolRefExpr::VariantKind DarwinRefKind;
665  int64_t Addend;
666  if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
667  Addend)) {
668  // If we don't understand the expression, assume the best and
669  // let the fixup and relocation code deal with it.
670  return true;
671  }
672 
673  if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
674  ELFRefKind == AArch64MCExpr::VK_LO12 ||
675  ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
676  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
677  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
678  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
679  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
680  ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
681  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
682  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
683  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
684  // Note that we don't range-check the addend. It's adjusted modulo page
685  // size when converted, so there is no "out of range" condition when using
686  // @pageoff.
687  return true;
688  } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
689  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
690  // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
691  return Addend == 0;
692  }
693 
694  return false;
695  }
696 
697  template <int Scale> bool isUImm12Offset() const {
698  if (!isImm())
699  return false;
700 
701  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
702  if (!MCE)
703  return isSymbolicUImm12Offset(getImm());
704 
705  int64_t Val = MCE->getValue();
706  return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
707  }
708 
709  template <int N, int M>
710  bool isImmInRange() const {
711  if (!isImm())
712  return false;
713  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
714  if (!MCE)
715  return false;
716  int64_t Val = MCE->getValue();
717  return (Val >= N && Val <= M);
718  }
719 
720  // NOTE: Also used for isLogicalImmNot as anything that can be represented as
721  // a logical immediate can always be represented when inverted.
722  template <typename T>
723  bool isLogicalImm() const {
724  if (!isImm())
725  return false;
726  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
727  if (!MCE)
728  return false;
729 
730  int64_t Val = MCE->getValue();
731  int64_t SVal = typename std::make_signed<T>::type(Val);
732  int64_t UVal = typename std::make_unsigned<T>::type(Val);
733  if (Val != SVal && Val != UVal)
734  return false;
735 
736  return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8);
737  }
738 
739  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
740 
741  /// Returns the immediate value as a pair of (imm, shift) if the immediate is
742  /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
743  /// immediate that can be shifted by 'Shift'.
744  template <unsigned Width>
745  Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
746  if (isShiftedImm() && Width == getShiftedImmShift())
747  if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
748  return std::make_pair(CE->getValue(), Width);
749 
750  if (isImm())
751  if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
752  int64_t Val = CE->getValue();
753  if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
754  return std::make_pair(Val >> Width, Width);
755  else
756  return std::make_pair(Val, 0u);
757  }
758 
759  return {};
760  }
761 
762  bool isAddSubImm() const {
763  if (!isShiftedImm() && !isImm())
764  return false;
765 
766  const MCExpr *Expr;
767 
768  // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
769  if (isShiftedImm()) {
770  unsigned Shift = ShiftedImm.ShiftAmount;
771  Expr = ShiftedImm.Val;
772  if (Shift != 0 && Shift != 12)
773  return false;
774  } else {
775  Expr = getImm();
776  }
777 
778  AArch64MCExpr::VariantKind ELFRefKind;
779  MCSymbolRefExpr::VariantKind DarwinRefKind;
780  int64_t Addend;
781  if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
782  DarwinRefKind, Addend)) {
783  return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
784  || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
785  || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
786  || ELFRefKind == AArch64MCExpr::VK_LO12
787  || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
788  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
789  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
790  || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
791  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
792  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
793  || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
794  || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
795  || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
796  }
797 
798  // If it's a constant, it should be a real immediate in range.
799  if (auto ShiftedVal = getShiftedVal<12>())
800  return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
801 
802  // If it's an expression, we hope for the best and let the fixup/relocation
803  // code deal with it.
804  return true;
805  }
806 
807  bool isAddSubImmNeg() const {
808  if (!isShiftedImm() && !isImm())
809  return false;
810 
811  // Otherwise it should be a real negative immediate in range.
812  if (auto ShiftedVal = getShiftedVal<12>())
813  return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
814 
815  return false;
816  }
817 
818  // Signed value in the range -128 to +127. For element widths of
819  // 16 bits or higher it may also be a signed multiple of 256 in the
820  // range -32768 to +32512.
821  // For element-width of 8 bits a range of -128 to 255 is accepted,
822  // since a copy of a byte can be either signed/unsigned.
823  template <typename T>
825  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
827 
828  bool IsByte =
829  std::is_same<int8_t, typename std::make_signed<T>::type>::value;
830  if (auto ShiftedImm = getShiftedVal<8>())
831  if (!(IsByte && ShiftedImm->second) &&
832  AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
833  << ShiftedImm->second))
835 
837  }
838 
839  // Unsigned value in the range 0 to 255. For element widths of
840  // 16 bits or higher it may also be a signed multiple of 256 in the
841  // range 0 to 65280.
842  template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
843  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
845 
846  bool IsByte =
847  std::is_same<int8_t, typename std::make_signed<T>::type>::value;
848  if (auto ShiftedImm = getShiftedVal<8>())
849  if (!(IsByte && ShiftedImm->second) &&
850  AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
851  << ShiftedImm->second))
853 
855  }
856 
857  template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
858  if (isLogicalImm<T>() && !isSVECpyImm<T>())
861  }
862 
863  bool isCondCode() const { return Kind == k_CondCode; }
864 
865  bool isSIMDImmType10() const {
866  if (!isImm())
867  return false;
868  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
869  if (!MCE)
870  return false;
872  }
873 
874  template<int N>
875  bool isBranchTarget() const {
876  if (!isImm())
877  return false;
878  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
879  if (!MCE)
880  return true;
881  int64_t Val = MCE->getValue();
882  if (Val & 0x3)
883  return false;
884  assert(N > 0 && "Branch target immediate cannot be 0 bits!");
885  return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
886  }
887 
888  bool
889  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
890  if (!isImm())
891  return false;
892 
893  AArch64MCExpr::VariantKind ELFRefKind;
894  MCSymbolRefExpr::VariantKind DarwinRefKind;
895  int64_t Addend;
896  if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
897  DarwinRefKind, Addend)) {
898  return false;
899  }
900  if (DarwinRefKind != MCSymbolRefExpr::VK_None)
901  return false;
902 
903  for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
904  if (ELFRefKind == AllowedModifiers[i])
905  return true;
906  }
907 
908  return false;
909  }
910 
911  bool isMovZSymbolG3() const {
912  return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
913  }
914 
915  bool isMovZSymbolG2() const {
919  }
920 
921  bool isMovZSymbolG1() const {
922  return isMovWSymbol({
926  });
927  }
928 
929  bool isMovZSymbolG0() const {
933  }
934 
935  bool isMovKSymbolG3() const {
936  return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
937  }
938 
939  bool isMovKSymbolG2() const {
940  return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
941  }
942 
943  bool isMovKSymbolG1() const {
944  return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
947  }
948 
949  bool isMovKSymbolG0() const {
950  return isMovWSymbol(
953  }
954 
955  template<int RegWidth, int Shift>
956  bool isMOVZMovAlias() const {
957  if (!isImm()) return false;
958 
959  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
960  if (!CE) return false;
961  uint64_t Value = CE->getValue();
962 
963  return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
964  }
965 
966  template<int RegWidth, int Shift>
967  bool isMOVNMovAlias() const {
968  if (!isImm()) return false;
969 
970  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
971  if (!CE) return false;
972  uint64_t Value = CE->getValue();
973 
974  return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
975  }
976 
977  bool isFPImm() const {
978  return Kind == k_FPImm &&
979  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
980  }
981 
982  bool isBarrier() const { return Kind == k_Barrier; }
983  bool isSysReg() const { return Kind == k_SysReg; }
984 
985  bool isMRSSystemRegister() const {
986  if (!isSysReg()) return false;
987 
988  return SysReg.MRSReg != -1U;
989  }
990 
991  bool isMSRSystemRegister() const {
992  if (!isSysReg()) return false;
993  return SysReg.MSRReg != -1U;
994  }
995 
996  bool isSystemPStateFieldWithImm0_1() const {
997  if (!isSysReg()) return false;
998  return (SysReg.PStateField == AArch64PState::PAN ||
999  SysReg.PStateField == AArch64PState::DIT ||
1000  SysReg.PStateField == AArch64PState::UAO);
1001  }
1002 
1003  bool isSystemPStateFieldWithImm0_15() const {
1004  if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1005  return SysReg.PStateField != -1U;
1006  }
1007 
1008  bool isReg() const override {
1009  return Kind == k_Register;
1010  }
1011 
1012  bool isScalarReg() const {
1013  return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1014  }
1015 
1016  bool isNeonVectorReg() const {
1017  return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1018  }
1019 
1020  bool isNeonVectorRegLo() const {
1021  return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1022  AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1023  Reg.RegNum);
1024  }
1025 
1026  template <unsigned Class> bool isSVEVectorReg() const {
1027  RegKind RK;
1028  switch (Class) {
1029  case AArch64::ZPRRegClassID:
1030  case AArch64::ZPR_3bRegClassID:
1031  case AArch64::ZPR_4bRegClassID:
1032  RK = RegKind::SVEDataVector;
1033  break;
1034  case AArch64::PPRRegClassID:
1035  case AArch64::PPR_3bRegClassID:
1036  RK = RegKind::SVEPredicateVector;
1037  break;
1038  default:
1039  llvm_unreachable("Unsupport register class");
1040  }
1041 
1042  return (Kind == k_Register && Reg.Kind == RK) &&
1043  AArch64MCRegisterClasses[Class].contains(getReg());
1044  }
1045 
1046  template <unsigned Class> bool isFPRasZPR() const {
1047  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1048  AArch64MCRegisterClasses[Class].contains(getReg());
1049  }
1050 
1051  template <int ElementWidth, unsigned Class>
1052  DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1053  if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1055 
1056  if (isSVEVectorReg<Class>() &&
1057  (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
1059 
1061  }
1062 
1063  template <int ElementWidth, unsigned Class>
1064  DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1065  if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1067 
1068  if (isSVEVectorReg<Class>() &&
1069  (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
1071 
1073  }
1074 
1075  template <int ElementWidth, unsigned Class,
1076  AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1077  bool ShiftWidthAlwaysSame>
1078  DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1079  auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1080  if (!VectorMatch.isMatch())
1082 
1083  // Give a more specific diagnostic when the user has explicitly typed in
1084  // a shift-amount that does not match what is expected, but for which
1085  // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1086  bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1087  if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1088  ShiftExtendTy == AArch64_AM::SXTW) &&
1089  !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1091 
1092  if (MatchShift && ShiftExtendTy == getShiftExtendType())
1094 
1096  }
1097 
1098  bool isGPR32as64() const {
1099  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1100  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1101  }
1102 
1103  bool isGPR64as32() const {
1104  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1105  AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1106  }
1107 
1108  bool isWSeqPair() const {
1109  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1110  AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1111  Reg.RegNum);
1112  }
1113 
1114  bool isXSeqPair() const {
1115  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1116  AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1117  Reg.RegNum);
1118  }
1119 
1120  template<int64_t Angle, int64_t Remainder>
1121  DiagnosticPredicate isComplexRotation() const {
1122  if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1123 
1124  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1125  if (!CE) return DiagnosticPredicateTy::NoMatch;
1126  uint64_t Value = CE->getValue();
1127 
1128  if (Value % Angle == Remainder && Value <= 270)
1131  }
1132 
1133  template <unsigned RegClassID> bool isGPR64() const {
1134  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1135  AArch64MCRegisterClasses[RegClassID].contains(getReg());
1136  }
1137 
1138  template <unsigned RegClassID, int ExtWidth>
1139  DiagnosticPredicate isGPR64WithShiftExtend() const {
1140  if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1142 
1143  if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1144  getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1147  }
1148 
1149  /// Is this a vector list with the type implicit (presumably attached to the
1150  /// instruction itself)?
1151  template <RegKind VectorKind, unsigned NumRegs>
1152  bool isImplicitlyTypedVectorList() const {
1153  return Kind == k_VectorList && VectorList.Count == NumRegs &&
1154  VectorList.NumElements == 0 &&
1155  VectorList.RegisterKind == VectorKind;
1156  }
1157 
1158  template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1159  unsigned ElementWidth>
1160  bool isTypedVectorList() const {
1161  if (Kind != k_VectorList)
1162  return false;
1163  if (VectorList.Count != NumRegs)
1164  return false;
1165  if (VectorList.RegisterKind != VectorKind)
1166  return false;
1167  if (VectorList.ElementWidth != ElementWidth)
1168  return false;
1169  return VectorList.NumElements == NumElements;
1170  }
1171 
1172  template <int Min, int Max>
1173  DiagnosticPredicate isVectorIndex() const {
1174  if (Kind != k_VectorIndex)
1176  if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1179  }
1180 
1181  bool isToken() const override { return Kind == k_Token; }
1182 
1183  bool isTokenEqual(StringRef Str) const {
1184  return Kind == k_Token && getToken() == Str;
1185  }
1186  bool isSysCR() const { return Kind == k_SysCR; }
1187  bool isPrefetch() const { return Kind == k_Prefetch; }
1188  bool isPSBHint() const { return Kind == k_PSBHint; }
1189  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1190  bool isShifter() const {
1191  if (!isShiftExtend())
1192  return false;
1193 
1194  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1195  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1196  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1197  ST == AArch64_AM::MSL);
1198  }
1199 
1200  template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1201  if (Kind != k_FPImm)
1203 
1204  if (getFPImmIsExact()) {
1205  // Lookup the immediate from table of supported immediates.
1206  auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1207  assert(Desc && "Unknown enum value");
1208 
1209  // Calculate its FP value.
1210  APFloat RealVal(APFloat::IEEEdouble());
1211  if (RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero) !=
1212  APFloat::opOK)
1213  llvm_unreachable("FP immediate is not exact");
1214 
1215  if (getFPImm().bitwiseIsEqual(RealVal))
1217  }
1218 
1220  }
1221 
1222  template <unsigned ImmA, unsigned ImmB>
1223  DiagnosticPredicate isExactFPImm() const {
1225  if ((Res = isExactFPImm<ImmA>()))
1227  if ((Res = isExactFPImm<ImmB>()))
1229  return Res;
1230  }
1231 
1232  bool isExtend() const {
1233  if (!isShiftExtend())
1234  return false;
1235 
1236  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1237  return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1238  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1239  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1240  ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1241  ET == AArch64_AM::LSL) &&
1242  getShiftExtendAmount() <= 4;
1243  }
1244 
1245  bool isExtend64() const {
1246  if (!isExtend())
1247  return false;
1248  // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
1249  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1250  return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
1251  }
1252 
1253  bool isExtendLSL64() const {
1254  if (!isExtend())
1255  return false;
1256  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1257  return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1258  ET == AArch64_AM::LSL) &&
1259  getShiftExtendAmount() <= 4;
1260  }
1261 
1262  template<int Width> bool isMemXExtend() const {
1263  if (!isExtend())
1264  return false;
1265  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1266  return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1267  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1268  getShiftExtendAmount() == 0);
1269  }
1270 
1271  template<int Width> bool isMemWExtend() const {
1272  if (!isExtend())
1273  return false;
1274  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1275  return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1276  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1277  getShiftExtendAmount() == 0);
1278  }
1279 
1280  template <unsigned width>
1281  bool isArithmeticShifter() const {
1282  if (!isShifter())
1283  return false;
1284 
1285  // An arithmetic shifter is LSL, LSR, or ASR.
1286  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1287  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1288  ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1289  }
1290 
1291  template <unsigned width>
1292  bool isLogicalShifter() const {
1293  if (!isShifter())
1294  return false;
1295 
1296  // A logical shifter is LSL, LSR, ASR or ROR.
1297  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1298  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1299  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1300  getShiftExtendAmount() < width;
1301  }
1302 
1303  bool isMovImm32Shifter() const {
1304  if (!isShifter())
1305  return false;
1306 
1307  // A MOVi shifter is LSL of 0, 16, 32, or 48.
1308  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1309  if (ST != AArch64_AM::LSL)
1310  return false;
1311  uint64_t Val = getShiftExtendAmount();
1312  return (Val == 0 || Val == 16);
1313  }
1314 
1315  bool isMovImm64Shifter() const {
1316  if (!isShifter())
1317  return false;
1318 
1319  // A MOVi shifter is LSL of 0 or 16.
1320  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1321  if (ST != AArch64_AM::LSL)
1322  return false;
1323  uint64_t Val = getShiftExtendAmount();
1324  return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1325  }
1326 
1327  bool isLogicalVecShifter() const {
1328  if (!isShifter())
1329  return false;
1330 
1331  // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1332  unsigned Shift = getShiftExtendAmount();
1333  return getShiftExtendType() == AArch64_AM::LSL &&
1334  (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1335  }
1336 
1337  bool isLogicalVecHalfWordShifter() const {
1338  if (!isLogicalVecShifter())
1339  return false;
1340 
1341  // A logical vector shifter is a left shift by 0 or 8.
1342  unsigned Shift = getShiftExtendAmount();
1343  return getShiftExtendType() == AArch64_AM::LSL &&
1344  (Shift == 0 || Shift == 8);
1345  }
1346 
1347  bool isMoveVecShifter() const {
1348  if (!isShiftExtend())
1349  return false;
1350 
1351  // A logical vector shifter is a left shift by 8 or 16.
1352  unsigned Shift = getShiftExtendAmount();
1353  return getShiftExtendType() == AArch64_AM::MSL &&
1354  (Shift == 8 || Shift == 16);
1355  }
1356 
1357  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1358  // to LDUR/STUR when the offset is not legal for the former but is for
1359  // the latter. As such, in addition to checking for being a legal unscaled
1360  // address, also check that it is not a legal scaled address. This avoids
1361  // ambiguity in the matcher.
1362  template<int Width>
1363  bool isSImm9OffsetFB() const {
1364  return isSImm<9>() && !isUImm12Offset<Width / 8>();
1365  }
1366 
1367  bool isAdrpLabel() const {
1368  // Validation was handled during parsing, so we just sanity check that
1369  // something didn't go haywire.
1370  if (!isImm())
1371  return false;
1372 
1373  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1374  int64_t Val = CE->getValue();
1375  int64_t Min = - (4096 * (1LL << (21 - 1)));
1376  int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1377  return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1378  }
1379 
1380  return true;
1381  }
1382 
1383  bool isAdrLabel() const {
1384  // Validation was handled during parsing, so we just sanity check that
1385  // something didn't go haywire.
1386  if (!isImm())
1387  return false;
1388 
1389  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1390  int64_t Val = CE->getValue();
1391  int64_t Min = - (1LL << (21 - 1));
1392  int64_t Max = ((1LL << (21 - 1)) - 1);
1393  return Val >= Min && Val <= Max;
1394  }
1395 
1396  return true;
1397  }
1398 
1399  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1400  // Add as immediates when possible. Null MCExpr = 0.
1401  if (!Expr)
1403  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1404  Inst.addOperand(MCOperand::createImm(CE->getValue()));
1405  else
1406  Inst.addOperand(MCOperand::createExpr(Expr));
1407  }
1408 
1409  void addRegOperands(MCInst &Inst, unsigned N) const {
1410  assert(N == 1 && "Invalid number of operands!");
1412  }
1413 
1414  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1415  assert(N == 1 && "Invalid number of operands!");
1416  assert(
1417  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1418 
1419  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1420  uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1421  RI->getEncodingValue(getReg()));
1422 
1423  Inst.addOperand(MCOperand::createReg(Reg));
1424  }
1425 
1426  void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1427  assert(N == 1 && "Invalid number of operands!");
1428  assert(
1429  AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1430 
1431  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1432  uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1433  RI->getEncodingValue(getReg()));
1434 
1435  Inst.addOperand(MCOperand::createReg(Reg));
1436  }
1437 
1438  template <int Width>
1439  void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1440  unsigned Base;
1441  switch (Width) {
1442  case 8: Base = AArch64::B0; break;
1443  case 16: Base = AArch64::H0; break;
1444  case 32: Base = AArch64::S0; break;
1445  case 64: Base = AArch64::D0; break;
1446  case 128: Base = AArch64::Q0; break;
1447  default:
1448  llvm_unreachable("Unsupported width");
1449  }
1450  Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1451  }
1452 
1453  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1454  assert(N == 1 && "Invalid number of operands!");
1455  assert(
1456  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1457  Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1458  }
1459 
1460  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1461  assert(N == 1 && "Invalid number of operands!");
1462  assert(
1463  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1465  }
1466 
1467  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1468  assert(N == 1 && "Invalid number of operands!");
1470  }
1471 
1472  enum VecListIndexType {
1473  VecListIdx_DReg = 0,
1474  VecListIdx_QReg = 1,
1475  VecListIdx_ZReg = 2,
1476  };
1477 
1478  template <VecListIndexType RegTy, unsigned NumRegs>
1479  void addVectorListOperands(MCInst &Inst, unsigned N) const {
1480  assert(N == 1 && "Invalid number of operands!");
1481  static const unsigned FirstRegs[][5] = {
1482  /* DReg */ { AArch64::Q0,
1483  AArch64::D0, AArch64::D0_D1,
1484  AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1485  /* QReg */ { AArch64::Q0,
1486  AArch64::Q0, AArch64::Q0_Q1,
1487  AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1488  /* ZReg */ { AArch64::Z0,
1489  AArch64::Z0, AArch64::Z0_Z1,
1490  AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1491  };
1492 
1493  assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1494  " NumRegs must be <= 4 for ZRegs");
1495 
1496  unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1497  Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1498  FirstRegs[(unsigned)RegTy][0]));
1499  }
1500 
1501  void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1502  assert(N == 1 && "Invalid number of operands!");
1503  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1504  }
1505 
1506  template <unsigned ImmIs0, unsigned ImmIs1>
1507  void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1508  assert(N == 1 && "Invalid number of operands!");
1509  assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1510  Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1511  }
1512 
1513  void addImmOperands(MCInst &Inst, unsigned N) const {
1514  assert(N == 1 && "Invalid number of operands!");
1515  // If this is a pageoff symrefexpr with an addend, adjust the addend
1516  // to be only the page-offset portion. Otherwise, just add the expr
1517  // as-is.
1518  addExpr(Inst, getImm());
1519  }
1520 
1521  template <int Shift>
1522  void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1523  assert(N == 2 && "Invalid number of operands!");
1524  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1525  Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1526  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1527  } else if (isShiftedImm()) {
1528  addExpr(Inst, getShiftedImmVal());
1529  Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1530  } else {
1531  addExpr(Inst, getImm());
1533  }
1534  }
1535 
1536  template <int Shift>
1537  void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1538  assert(N == 2 && "Invalid number of operands!");
1539  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1540  Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1541  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1542  } else
1543  llvm_unreachable("Not a shifted negative immediate");
1544  }
1545 
1546  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1547  assert(N == 1 && "Invalid number of operands!");
1548  Inst.addOperand(MCOperand::createImm(getCondCode()));
1549  }
1550 
1551  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1552  assert(N == 1 && "Invalid number of operands!");
1553  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1554  if (!MCE)
1555  addExpr(Inst, getImm());
1556  else
1557  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1558  }
1559 
1560  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1561  addImmOperands(Inst, N);
1562  }
1563 
1564  template<int Scale>
1565  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1566  assert(N == 1 && "Invalid number of operands!");
1567  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1568 
1569  if (!MCE) {
1570  Inst.addOperand(MCOperand::createExpr(getImm()));
1571  return;
1572  }
1573  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1574  }
1575 
1576  void addUImm6Operands(MCInst &Inst, unsigned N) const {
1577  assert(N == 1 && "Invalid number of operands!");
1578  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1580  }
1581 
1582  template <int Scale>
1583  void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1584  assert(N == 1 && "Invalid number of operands!");
1585  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1586  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1587  }
1588 
1589  template <typename T>
1590  void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1591  assert(N == 1 && "Invalid number of operands!");
1592  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1593  typename std::make_unsigned<T>::type Val = MCE->getValue();
1594  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1595  Inst.addOperand(MCOperand::createImm(encoding));
1596  }
1597 
1598  template <typename T>
1599  void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1600  assert(N == 1 && "Invalid number of operands!");
1601  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1602  typename std::make_unsigned<T>::type Val = ~MCE->getValue();
1603  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1604  Inst.addOperand(MCOperand::createImm(encoding));
1605  }
1606 
1607  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1608  assert(N == 1 && "Invalid number of operands!");
1609  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1610  uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1611  Inst.addOperand(MCOperand::createImm(encoding));
1612  }
1613 
1614  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1615  // Branch operands don't encode the low bits, so shift them off
1616  // here. If it's a label, however, just put it on directly as there's
1617  // not enough information now to do anything.
1618  assert(N == 1 && "Invalid number of operands!");
1619  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1620  if (!MCE) {
1621  addExpr(Inst, getImm());
1622  return;
1623  }
1624  assert(MCE && "Invalid constant immediate operand!");
1625  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1626  }
1627 
1628  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1629  // Branch operands don't encode the low bits, so shift them off
1630  // here. If it's a label, however, just put it on directly as there's
1631  // not enough information now to do anything.
1632  assert(N == 1 && "Invalid number of operands!");
1633  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1634  if (!MCE) {
1635  addExpr(Inst, getImm());
1636  return;
1637  }
1638  assert(MCE && "Invalid constant immediate operand!");
1639  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1640  }
1641 
1642  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1643  // Branch operands don't encode the low bits, so shift them off
1644  // here. If it's a label, however, just put it on directly as there's
1645  // not enough information now to do anything.
1646  assert(N == 1 && "Invalid number of operands!");
1647  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1648  if (!MCE) {
1649  addExpr(Inst, getImm());
1650  return;
1651  }
1652  assert(MCE && "Invalid constant immediate operand!");
1653  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1654  }
1655 
1656  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1657  assert(N == 1 && "Invalid number of operands!");
1659  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1660  }
1661 
1662  void addBarrierOperands(MCInst &Inst, unsigned N) const {
1663  assert(N == 1 && "Invalid number of operands!");
1664  Inst.addOperand(MCOperand::createImm(getBarrier()));
1665  }
1666 
1667  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1668  assert(N == 1 && "Invalid number of operands!");
1669 
1670  Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1671  }
1672 
1673  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1674  assert(N == 1 && "Invalid number of operands!");
1675 
1676  Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1677  }
1678 
1679  void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1680  assert(N == 1 && "Invalid number of operands!");
1681 
1682  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1683  }
1684 
1685  void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1686  assert(N == 1 && "Invalid number of operands!");
1687 
1688  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1689  }
1690 
1691  void addSysCROperands(MCInst &Inst, unsigned N) const {
1692  assert(N == 1 && "Invalid number of operands!");
1693  Inst.addOperand(MCOperand::createImm(getSysCR()));
1694  }
1695 
1696  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1697  assert(N == 1 && "Invalid number of operands!");
1698  Inst.addOperand(MCOperand::createImm(getPrefetch()));
1699  }
1700 
1701  void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1702  assert(N == 1 && "Invalid number of operands!");
1703  Inst.addOperand(MCOperand::createImm(getPSBHint()));
1704  }
1705 
1706  void addShifterOperands(MCInst &Inst, unsigned N) const {
1707  assert(N == 1 && "Invalid number of operands!");
1708  unsigned Imm =
1709  AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1710  Inst.addOperand(MCOperand::createImm(Imm));
1711  }
1712 
1713  void addExtendOperands(MCInst &Inst, unsigned N) const {
1714  assert(N == 1 && "Invalid number of operands!");
1715  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1716  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1717  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1718  Inst.addOperand(MCOperand::createImm(Imm));
1719  }
1720 
1721  void addExtend64Operands(MCInst &Inst, unsigned N) const {
1722  assert(N == 1 && "Invalid number of operands!");
1723  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1724  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1725  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1726  Inst.addOperand(MCOperand::createImm(Imm));
1727  }
1728 
1729  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1730  assert(N == 2 && "Invalid number of operands!");
1731  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1732  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1733  Inst.addOperand(MCOperand::createImm(IsSigned));
1734  Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1735  }
1736 
1737  // For 8-bit load/store instructions with a register offset, both the
1738  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1739  // they're disambiguated by whether the shift was explicit or implicit rather
1740  // than its size.
1741  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1742  assert(N == 2 && "Invalid number of operands!");
1743  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1744  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1745  Inst.addOperand(MCOperand::createImm(IsSigned));
1746  Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1747  }
1748 
1749  template<int Shift>
1750  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1751  assert(N == 1 && "Invalid number of operands!");
1752 
1753  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1754  uint64_t Value = CE->getValue();
1755  Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1756  }
1757 
1758  template<int Shift>
1759  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1760  assert(N == 1 && "Invalid number of operands!");
1761 
1762  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1763  uint64_t Value = CE->getValue();
1764  Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1765  }
1766 
1767  void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1768  assert(N == 1 && "Invalid number of operands!");
1769  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1770  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1771  }
1772 
1773  void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1774  assert(N == 1 && "Invalid number of operands!");
1775  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1776  Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1777  }
1778 
1779  void print(raw_ostream &OS) const override;
1780 
1781  static std::unique_ptr<AArch64Operand>
1782  CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1783  auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1784  Op->Tok.Data = Str.data();
1785  Op->Tok.Length = Str.size();
1786  Op->Tok.IsSuffix = IsSuffix;
1787  Op->StartLoc = S;
1788  Op->EndLoc = S;
1789  return Op;
1790  }
1791 
1792  static std::unique_ptr<AArch64Operand>
1793  CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1794  RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1796  unsigned ShiftAmount = 0,
1797  unsigned HasExplicitAmount = false) {
1798  auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1799  Op->Reg.RegNum = RegNum;
1800  Op->Reg.Kind = Kind;
1801  Op->Reg.ElementWidth = 0;
1802  Op->Reg.EqualityTy = EqTy;
1803  Op->Reg.ShiftExtend.Type = ExtTy;
1804  Op->Reg.ShiftExtend.Amount = ShiftAmount;
1805  Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1806  Op->StartLoc = S;
1807  Op->EndLoc = E;
1808  return Op;
1809  }
1810 
1811  static std::unique_ptr<AArch64Operand>
1812  CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1813  SMLoc S, SMLoc E, MCContext &Ctx,
1815  unsigned ShiftAmount = 0,
1816  unsigned HasExplicitAmount = false) {
1817  assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
1818  Kind == RegKind::SVEPredicateVector) &&
1819  "Invalid vector kind");
1820  auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1821  HasExplicitAmount);
1822  Op->Reg.ElementWidth = ElementWidth;
1823  return Op;
1824  }
1825 
1826  static std::unique_ptr<AArch64Operand>
1827  CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1828  unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1829  MCContext &Ctx) {
1830  auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1831  Op->VectorList.RegNum = RegNum;
1832  Op->VectorList.Count = Count;
1833  Op->VectorList.NumElements = NumElements;
1834  Op->VectorList.ElementWidth = ElementWidth;
1835  Op->VectorList.RegisterKind = RegisterKind;
1836  Op->StartLoc = S;
1837  Op->EndLoc = E;
1838  return Op;
1839  }
1840 
1841  static std::unique_ptr<AArch64Operand>
1842  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1843  auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1844  Op->VectorIndex.Val = Idx;
1845  Op->StartLoc = S;
1846  Op->EndLoc = E;
1847  return Op;
1848  }
1849 
1850  static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1851  SMLoc E, MCContext &Ctx) {
1852  auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1853  Op->Imm.Val = Val;
1854  Op->StartLoc = S;
1855  Op->EndLoc = E;
1856  return Op;
1857  }
1858 
1859  static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1860  unsigned ShiftAmount,
1861  SMLoc S, SMLoc E,
1862  MCContext &Ctx) {
1863  auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1864  Op->ShiftedImm .Val = Val;
1865  Op->ShiftedImm.ShiftAmount = ShiftAmount;
1866  Op->StartLoc = S;
1867  Op->EndLoc = E;
1868  return Op;
1869  }
1870 
1871  static std::unique_ptr<AArch64Operand>
1872  CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1873  auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1874  Op->CondCode.Code = Code;
1875  Op->StartLoc = S;
1876  Op->EndLoc = E;
1877  return Op;
1878  }
1879 
1880  static std::unique_ptr<AArch64Operand>
1881  CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1882  auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1883  Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1884  Op->FPImm.IsExact = IsExact;
1885  Op->StartLoc = S;
1886  Op->EndLoc = S;
1887  return Op;
1888  }
1889 
1890  static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1891  StringRef Str,
1892  SMLoc S,
1893  MCContext &Ctx) {
1894  auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1895  Op->Barrier.Val = Val;
1896  Op->Barrier.Data = Str.data();
1897  Op->Barrier.Length = Str.size();
1898  Op->StartLoc = S;
1899  Op->EndLoc = S;
1900  return Op;
1901  }
1902 
1903  static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1904  uint32_t MRSReg,
1905  uint32_t MSRReg,
1906  uint32_t PStateField,
1907  MCContext &Ctx) {
1908  auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1909  Op->SysReg.Data = Str.data();
1910  Op->SysReg.Length = Str.size();
1911  Op->SysReg.MRSReg = MRSReg;
1912  Op->SysReg.MSRReg = MSRReg;
1913  Op->SysReg.PStateField = PStateField;
1914  Op->StartLoc = S;
1915  Op->EndLoc = S;
1916  return Op;
1917  }
1918 
1919  static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1920  SMLoc E, MCContext &Ctx) {
1921  auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1922  Op->SysCRImm.Val = Val;
1923  Op->StartLoc = S;
1924  Op->EndLoc = E;
1925  return Op;
1926  }
1927 
1928  static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1929  StringRef Str,
1930  SMLoc S,
1931  MCContext &Ctx) {
1932  auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1933  Op->Prefetch.Val = Val;
1934  Op->Barrier.Data = Str.data();
1935  Op->Barrier.Length = Str.size();
1936  Op->StartLoc = S;
1937  Op->EndLoc = S;
1938  return Op;
1939  }
1940 
1941  static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1942  StringRef Str,
1943  SMLoc S,
1944  MCContext &Ctx) {
1945  auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1946  Op->PSBHint.Val = Val;
1947  Op->PSBHint.Data = Str.data();
1948  Op->PSBHint.Length = Str.size();
1949  Op->StartLoc = S;
1950  Op->EndLoc = S;
1951  return Op;
1952  }
1953 
1954  static std::unique_ptr<AArch64Operand>
1955  CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1956  bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1957  auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1958  Op->ShiftExtend.Type = ShOp;
1959  Op->ShiftExtend.Amount = Val;
1960  Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1961  Op->StartLoc = S;
1962  Op->EndLoc = E;
1963  return Op;
1964  }
1965 };
1966 
1967 } // end anonymous namespace.
1968 
1969 void AArch64Operand::print(raw_ostream &OS) const {
1970  switch (Kind) {
1971  case k_FPImm:
1972  OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
1973  if (!getFPImmIsExact())
1974  OS << " (inexact)";
1975  OS << ">";
1976  break;
1977  case k_Barrier: {
1978  StringRef Name = getBarrierName();
1979  if (!Name.empty())
1980  OS << "<barrier " << Name << ">";
1981  else
1982  OS << "<barrier invalid #" << getBarrier() << ">";
1983  break;
1984  }
1985  case k_Immediate:
1986  OS << *getImm();
1987  break;
1988  case k_ShiftedImm: {
1989  unsigned Shift = getShiftedImmShift();
1990  OS << "<shiftedimm ";
1991  OS << *getShiftedImmVal();
1992  OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1993  break;
1994  }
1995  case k_CondCode:
1996  OS << "<condcode " << getCondCode() << ">";
1997  break;
1998  case k_VectorList: {
1999  OS << "<vectorlist ";
2000  unsigned Reg = getVectorListStart();
2001  for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2002  OS << Reg + i << " ";
2003  OS << ">";
2004  break;
2005  }
2006  case k_VectorIndex:
2007  OS << "<vectorindex " << getVectorIndex() << ">";
2008  break;
2009  case k_SysReg:
2010  OS << "<sysreg: " << getSysReg() << '>';
2011  break;
2012  case k_Token:
2013  OS << "'" << getToken() << "'";
2014  break;
2015  case k_SysCR:
2016  OS << "c" << getSysCR();
2017  break;
2018  case k_Prefetch: {
2019  StringRef Name = getPrefetchName();
2020  if (!Name.empty())
2021  OS << "<prfop " << Name << ">";
2022  else
2023  OS << "<prfop invalid #" << getPrefetch() << ">";
2024  break;
2025  }
2026  case k_PSBHint:
2027  OS << getPSBHintName();
2028  break;
2029  case k_Register:
2030  OS << "<register " << getReg() << ">";
2031  if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2032  break;
2034  case k_ShiftExtend:
2035  OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2036  << getShiftExtendAmount();
2037  if (!hasShiftExtendAmount())
2038  OS << "<imp>";
2039  OS << '>';
2040  break;
2041  }
2042 }
2043 
2044 /// @name Auto-generated Match Functions
2045 /// {
2046 
2047 static unsigned MatchRegisterName(StringRef Name);
2048 
2049 /// }
2050 
2051 static unsigned MatchNeonVectorRegName(StringRef Name) {
2052  return StringSwitch<unsigned>(Name.lower())
2053  .Case("v0", AArch64::Q0)
2054  .Case("v1", AArch64::Q1)
2055  .Case("v2", AArch64::Q2)
2056  .Case("v3", AArch64::Q3)
2057  .Case("v4", AArch64::Q4)
2058  .Case("v5", AArch64::Q5)
2059  .Case("v6", AArch64::Q6)
2060  .Case("v7", AArch64::Q7)
2061  .Case("v8", AArch64::Q8)
2062  .Case("v9", AArch64::Q9)
2063  .Case("v10", AArch64::Q10)
2064  .Case("v11", AArch64::Q11)
2065  .Case("v12", AArch64::Q12)
2066  .Case("v13", AArch64::Q13)
2067  .Case("v14", AArch64::Q14)
2068  .Case("v15", AArch64::Q15)
2069  .Case("v16", AArch64::Q16)
2070  .Case("v17", AArch64::Q17)
2071  .Case("v18", AArch64::Q18)
2072  .Case("v19", AArch64::Q19)
2073  .Case("v20", AArch64::Q20)
2074  .Case("v21", AArch64::Q21)
2075  .Case("v22", AArch64::Q22)
2076  .Case("v23", AArch64::Q23)
2077  .Case("v24", AArch64::Q24)
2078  .Case("v25", AArch64::Q25)
2079  .Case("v26", AArch64::Q26)
2080  .Case("v27", AArch64::Q27)
2081  .Case("v28", AArch64::Q28)
2082  .Case("v29", AArch64::Q29)
2083  .Case("v30", AArch64::Q30)
2084  .Case("v31", AArch64::Q31)
2085  .Default(0);
2086 }
2087 
2088 /// Returns an optional pair of (#elements, element-width) if Suffix
2089 /// is a valid vector kind. Where the number of elements in a vector
2090 /// or the vector width is implicit or explicitly unknown (but still a
2091 /// valid suffix kind), 0 is used.
2092 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2093  RegKind VectorKind) {
2094  std::pair<int, int> Res = {-1, -1};
2095 
2096  switch (VectorKind) {
2097  case RegKind::NeonVector:
2098  Res =
2099  StringSwitch<std::pair<int, int>>(Suffix.lower())
2100  .Case("", {0, 0})
2101  .Case(".1d", {1, 64})
2102  .Case(".1q", {1, 128})
2103  // '.2h' needed for fp16 scalar pairwise reductions
2104  .Case(".2h", {2, 16})
2105  .Case(".2s", {2, 32})
2106  .Case(".2d", {2, 64})
2107  // '.4b' is another special case for the ARMv8.2a dot product
2108  // operand
2109  .Case(".4b", {4, 8})
2110  .Case(".4h", {4, 16})
2111  .Case(".4s", {4, 32})
2112  .Case(".8b", {8, 8})
2113  .Case(".8h", {8, 16})
2114  .Case(".16b", {16, 8})
2115  // Accept the width neutral ones, too, for verbose syntax. If those
2116  // aren't used in the right places, the token operand won't match so
2117  // all will work out.
2118  .Case(".b", {0, 8})
2119  .Case(".h", {0, 16})
2120  .Case(".s", {0, 32})
2121  .Case(".d", {0, 64})
2122  .Default({-1, -1});
2123  break;
2124  case RegKind::SVEPredicateVector:
2125  case RegKind::SVEDataVector:
2126  Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2127  .Case("", {0, 0})
2128  .Case(".b", {0, 8})
2129  .Case(".h", {0, 16})
2130  .Case(".s", {0, 32})
2131  .Case(".d", {0, 64})
2132  .Case(".q", {0, 128})
2133  .Default({-1, -1});
2134  break;
2135  default:
2136  llvm_unreachable("Unsupported RegKind");
2137  }
2138 
2139  if (Res == std::make_pair(-1, -1))
2140  return Optional<std::pair<int, int>>();
2141 
2142  return Optional<std::pair<int, int>>(Res);
2143 }
2144 
2145 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2146  return parseVectorKind(Suffix, VectorKind).hasValue();
2147 }
2148 
2149 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2150  return StringSwitch<unsigned>(Name.lower())
2151  .Case("z0", AArch64::Z0)
2152  .Case("z1", AArch64::Z1)
2153  .Case("z2", AArch64::Z2)
2154  .Case("z3", AArch64::Z3)
2155  .Case("z4", AArch64::Z4)
2156  .Case("z5", AArch64::Z5)
2157  .Case("z6", AArch64::Z6)
2158  .Case("z7", AArch64::Z7)
2159  .Case("z8", AArch64::Z8)
2160  .Case("z9", AArch64::Z9)
2161  .Case("z10", AArch64::Z10)
2162  .Case("z11", AArch64::Z11)
2163  .Case("z12", AArch64::Z12)
2164  .Case("z13", AArch64::Z13)
2165  .Case("z14", AArch64::Z14)
2166  .Case("z15", AArch64::Z15)
2167  .Case("z16", AArch64::Z16)
2168  .Case("z17", AArch64::Z17)
2169  .Case("z18", AArch64::Z18)
2170  .Case("z19", AArch64::Z19)
2171  .Case("z20", AArch64::Z20)
2172  .Case("z21", AArch64::Z21)
2173  .Case("z22", AArch64::Z22)
2174  .Case("z23", AArch64::Z23)
2175  .Case("z24", AArch64::Z24)
2176  .Case("z25", AArch64::Z25)
2177  .Case("z26", AArch64::Z26)
2178  .Case("z27", AArch64::Z27)
2179  .Case("z28", AArch64::Z28)
2180  .Case("z29", AArch64::Z29)
2181  .Case("z30", AArch64::Z30)
2182  .Case("z31", AArch64::Z31)
2183  .Default(0);
2184 }
2185 
2186 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2187  return StringSwitch<unsigned>(Name.lower())
2188  .Case("p0", AArch64::P0)
2189  .Case("p1", AArch64::P1)
2190  .Case("p2", AArch64::P2)
2191  .Case("p3", AArch64::P3)
2192  .Case("p4", AArch64::P4)
2193  .Case("p5", AArch64::P5)
2194  .Case("p6", AArch64::P6)
2195  .Case("p7", AArch64::P7)
2196  .Case("p8", AArch64::P8)
2197  .Case("p9", AArch64::P9)
2198  .Case("p10", AArch64::P10)
2199  .Case("p11", AArch64::P11)
2200  .Case("p12", AArch64::P12)
2201  .Case("p13", AArch64::P13)
2202  .Case("p14", AArch64::P14)
2203  .Case("p15", AArch64::P15)
2204  .Default(0);
2205 }
2206 
2207 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2208  SMLoc &EndLoc) {
2209  StartLoc = getLoc();
2210  auto Res = tryParseScalarRegister(RegNo);
2211  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2212  return Res != MatchOperand_Success;
2213 }
2214 
2215 // Matches a register name or register alias previously defined by '.req'
2216 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2217  RegKind Kind) {
2218  unsigned RegNum = 0;
2219  if ((RegNum = matchSVEDataVectorRegName(Name)))
2220  return Kind == RegKind::SVEDataVector ? RegNum : 0;
2221 
2222  if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2223  return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2224 
2225  if ((RegNum = MatchNeonVectorRegName(Name)))
2226  return Kind == RegKind::NeonVector ? RegNum : 0;
2227 
2228  // The parsed register must be of RegKind Scalar
2229  if ((RegNum = MatchRegisterName(Name)))
2230  return Kind == RegKind::Scalar ? RegNum : 0;
2231 
2232  if (!RegNum) {
2233  // Handle a few common aliases of registers.
2234  if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2235  .Case("fp", AArch64::FP)
2236  .Case("lr", AArch64::LR)
2237  .Case("x31", AArch64::XZR)
2238  .Case("w31", AArch64::WZR)
2239  .Default(0))
2240  return Kind == RegKind::Scalar ? RegNum : 0;
2241 
2242  // Check for aliases registered via .req. Canonicalize to lower case.
2243  // That's more consistent since register names are case insensitive, and
2244  // it's how the original entry was passed in from MC/MCParser/AsmParser.
2245  auto Entry = RegisterReqs.find(Name.lower());
2246  if (Entry == RegisterReqs.end())
2247  return 0;
2248 
2249  // set RegNum if the match is the right kind of register
2250  if (Kind == Entry->getValue().first)
2251  RegNum = Entry->getValue().second;
2252  }
2253  return RegNum;
2254 }
2255 
2256 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2257 /// Identifier when called, and if it is a register name the token is eaten and
2258 /// the register is added to the operand list.
2260 AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2261  MCAsmParser &Parser = getParser();
2262  const AsmToken &Tok = Parser.getTok();
2263  if (Tok.isNot(AsmToken::Identifier))
2264  return MatchOperand_NoMatch;
2265 
2266  std::string lowerCase = Tok.getString().lower();
2267  unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2268  if (Reg == 0)
2269  return MatchOperand_NoMatch;
2270 
2271  RegNum = Reg;
2272  Parser.Lex(); // Eat identifier token.
2273  return MatchOperand_Success;
2274 }
2275 
2276 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2278 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2279  MCAsmParser &Parser = getParser();
2280  SMLoc S = getLoc();
2281 
2282  if (Parser.getTok().isNot(AsmToken::Identifier)) {
2283  Error(S, "Expected cN operand where 0 <= N <= 15");
2284  return MatchOperand_ParseFail;
2285  }
2286 
2287  StringRef Tok = Parser.getTok().getIdentifier();
2288  if (Tok[0] != 'c' && Tok[0] != 'C') {
2289  Error(S, "Expected cN operand where 0 <= N <= 15");
2290  return MatchOperand_ParseFail;
2291  }
2292 
2293  uint32_t CRNum;
2294  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2295  if (BadNum || CRNum > 15) {
2296  Error(S, "Expected cN operand where 0 <= N <= 15");
2297  return MatchOperand_ParseFail;
2298  }
2299 
2300  Parser.Lex(); // Eat identifier token.
2301  Operands.push_back(
2302  AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2303  return MatchOperand_Success;
2304 }
2305 
2306 /// tryParsePrefetch - Try to parse a prefetch operand.
2307 template <bool IsSVEPrefetch>
2309 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2310  MCAsmParser &Parser = getParser();
2311  SMLoc S = getLoc();
2312  const AsmToken &Tok = Parser.getTok();
2313 
2314  auto LookupByName = [](StringRef N) {
2315  if (IsSVEPrefetch) {
2316  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2317  return Optional<unsigned>(Res->Encoding);
2318  } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2319  return Optional<unsigned>(Res->Encoding);
2320  return Optional<unsigned>();
2321  };
2322 
2323  auto LookupByEncoding = [](unsigned E) {
2324  if (IsSVEPrefetch) {
2325  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2326  return Optional<StringRef>(Res->Name);
2327  } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2328  return Optional<StringRef>(Res->Name);
2329  return Optional<StringRef>();
2330  };
2331  unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2332 
2333  // Either an identifier for named values or a 5-bit immediate.
2334  // Eat optional hash.
2335  if (parseOptionalToken(AsmToken::Hash) ||
2336  Tok.is(AsmToken::Integer)) {
2337  const MCExpr *ImmVal;
2338  if (getParser().parseExpression(ImmVal))
2339  return MatchOperand_ParseFail;
2340 
2341  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2342  if (!MCE) {
2343  TokError("immediate value expected for prefetch operand");
2344  return MatchOperand_ParseFail;
2345  }
2346  unsigned prfop = MCE->getValue();
2347  if (prfop > MaxVal) {
2348  TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2349  "] expected");
2350  return MatchOperand_ParseFail;
2351  }
2352 
2353  auto PRFM = LookupByEncoding(MCE->getValue());
2354  Operands.push_back(AArch64Operand::CreatePrefetch(
2355  prfop, PRFM.getValueOr(""), S, getContext()));
2356  return MatchOperand_Success;
2357  }
2358 
2359  if (Tok.isNot(AsmToken::Identifier)) {
2360  TokError("prefetch hint expected");
2361  return MatchOperand_ParseFail;
2362  }
2363 
2364  auto PRFM = LookupByName(Tok.getString());
2365  if (!PRFM) {
2366  TokError("prefetch hint expected");
2367  return MatchOperand_ParseFail;
2368  }
2369 
2370  Parser.Lex(); // Eat identifier token.
2371  Operands.push_back(AArch64Operand::CreatePrefetch(
2372  *PRFM, Tok.getString(), S, getContext()));
2373  return MatchOperand_Success;
2374 }
2375 
2376 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2378 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2379  MCAsmParser &Parser = getParser();
2380  SMLoc S = getLoc();
2381  const AsmToken &Tok = Parser.getTok();
2382  if (Tok.isNot(AsmToken::Identifier)) {
2383  TokError("invalid operand for instruction");
2384  return MatchOperand_ParseFail;
2385  }
2386 
2387  auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2388  if (!PSB) {
2389  TokError("invalid operand for instruction");
2390  return MatchOperand_ParseFail;
2391  }
2392 
2393  Parser.Lex(); // Eat identifier token.
2394  Operands.push_back(AArch64Operand::CreatePSBHint(
2395  PSB->Encoding, Tok.getString(), S, getContext()));
2396  return MatchOperand_Success;
2397 }
2398 
2399 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2400 /// instruction.
2402 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2403  MCAsmParser &Parser = getParser();
2404  SMLoc S = getLoc();
2405  const MCExpr *Expr;
2406 
2407  if (Parser.getTok().is(AsmToken::Hash)) {
2408  Parser.Lex(); // Eat hash token.
2409  }
2410 
2411  if (parseSymbolicImmVal(Expr))
2412  return MatchOperand_ParseFail;
2413 
2414  AArch64MCExpr::VariantKind ELFRefKind;
2415  MCSymbolRefExpr::VariantKind DarwinRefKind;
2416  int64_t Addend;
2417  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2418  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2419  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2420  // No modifier was specified at all; this is the syntax for an ELF basic
2421  // ADRP relocation (unfortunately).
2422  Expr =
2424  } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2425  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2426  Addend != 0) {
2427  Error(S, "gotpage label reference not allowed an addend");
2428  return MatchOperand_ParseFail;
2429  } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2430  DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2431  DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2432  ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2433  ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2434  ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2435  // The operand must be an @page or @gotpage qualified symbolref.
2436  Error(S, "page or gotpage label reference expected");
2437  return MatchOperand_ParseFail;
2438  }
2439  }
2440 
2441  // We have either a label reference possibly with addend or an immediate. The
2442  // addend is a raw value here. The linker will adjust it to only reference the
2443  // page.
2444  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2445  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2446 
2447  return MatchOperand_Success;
2448 }
2449 
2450 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2451 /// instruction.
2453 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2454  SMLoc S = getLoc();
2455  const MCExpr *Expr;
2456 
2457  // Leave anything with a bracket to the default for SVE
2458  if (getParser().getTok().is(AsmToken::LBrac))
2459  return MatchOperand_NoMatch;
2460 
2461  if (getParser().getTok().is(AsmToken::Hash))
2462  getParser().Lex(); // Eat hash token.
2463 
2464  if (parseSymbolicImmVal(Expr))
2465  return MatchOperand_ParseFail;
2466 
2467  AArch64MCExpr::VariantKind ELFRefKind;
2468  MCSymbolRefExpr::VariantKind DarwinRefKind;
2469  int64_t Addend;
2470  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2471  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2472  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2473  // No modifier was specified at all; this is the syntax for an ELF basic
2474  // ADR relocation (unfortunately).
2475  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2476  } else {
2477  Error(S, "unexpected adr label");
2478  return MatchOperand_ParseFail;
2479  }
2480  }
2481 
2482  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2483  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2484  return MatchOperand_Success;
2485 }
2486 
2487 /// tryParseFPImm - A floating point immediate expression operand.
2488 template<bool AddFPZeroAsLiteral>
2490 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2491  MCAsmParser &Parser = getParser();
2492  SMLoc S = getLoc();
2493 
2494  bool Hash = parseOptionalToken(AsmToken::Hash);
2495 
2496  // Handle negation, as that still comes through as a separate token.
2497  bool isNegative = parseOptionalToken(AsmToken::Minus);
2498 
2499  const AsmToken &Tok = Parser.getTok();
2500  if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2501  if (!Hash)
2502  return MatchOperand_NoMatch;
2503  TokError("invalid floating point immediate");
2504  return MatchOperand_ParseFail;
2505  }
2506 
2507  // Parse hexadecimal representation.
2508  if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2509  if (Tok.getIntVal() > 255 || isNegative) {
2510  TokError("encoded floating point value out of range");
2511  return MatchOperand_ParseFail;
2512  }
2513 
2514  APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2515  Operands.push_back(
2516  AArch64Operand::CreateFPImm(F, true, S, getContext()));
2517  } else {
2518  // Parse FP representation.
2519  APFloat RealVal(APFloat::IEEEdouble());
2520  auto Status =
2522  if (isNegative)
2523  RealVal.changeSign();
2524 
2525  if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2526  Operands.push_back(
2527  AArch64Operand::CreateToken("#0", false, S, getContext()));
2528  Operands.push_back(
2529  AArch64Operand::CreateToken(".0", false, S, getContext()));
2530  } else
2531  Operands.push_back(AArch64Operand::CreateFPImm(
2532  RealVal, Status == APFloat::opOK, S, getContext()));
2533  }
2534 
2535  Parser.Lex(); // Eat the token.
2536 
2537  return MatchOperand_Success;
2538 }
2539 
2540 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2541 /// a shift suffix, for example '#1, lsl #12'.
2543 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2544  MCAsmParser &Parser = getParser();
2545  SMLoc S = getLoc();
2546 
2547  if (Parser.getTok().is(AsmToken::Hash))
2548  Parser.Lex(); // Eat '#'
2549  else if (Parser.getTok().isNot(AsmToken::Integer))
2550  // Operand should start from # or should be integer, emit error otherwise.
2551  return MatchOperand_NoMatch;
2552 
2553  const MCExpr *Imm;
2554  if (parseSymbolicImmVal(Imm))
2555  return MatchOperand_ParseFail;
2556  else if (Parser.getTok().isNot(AsmToken::Comma)) {
2557  SMLoc E = Parser.getTok().getLoc();
2558  Operands.push_back(
2559  AArch64Operand::CreateImm(Imm, S, E, getContext()));
2560  return MatchOperand_Success;
2561  }
2562 
2563  // Eat ','
2564  Parser.Lex();
2565 
2566  // The optional operand must be "lsl #N" where N is non-negative.
2567  if (!Parser.getTok().is(AsmToken::Identifier) ||
2568  !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2569  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2570  return MatchOperand_ParseFail;
2571  }
2572 
2573  // Eat 'lsl'
2574  Parser.Lex();
2575 
2576  parseOptionalToken(AsmToken::Hash);
2577 
2578  if (Parser.getTok().isNot(AsmToken::Integer)) {
2579  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2580  return MatchOperand_ParseFail;
2581  }
2582 
2583  int64_t ShiftAmount = Parser.getTok().getIntVal();
2584 
2585  if (ShiftAmount < 0) {
2586  Error(Parser.getTok().getLoc(), "positive shift amount required");
2587  return MatchOperand_ParseFail;
2588  }
2589  Parser.Lex(); // Eat the number
2590 
2591  // Just in case the optional lsl #0 is used for immediates other than zero.
2592  if (ShiftAmount == 0 && Imm != 0) {
2593  SMLoc E = Parser.getTok().getLoc();
2594  Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2595  return MatchOperand_Success;
2596  }
2597 
2598  SMLoc E = Parser.getTok().getLoc();
2599  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2600  S, E, getContext()));
2601  return MatchOperand_Success;
2602 }
2603 
2604 /// parseCondCodeString - Parse a Condition Code string.
2605 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2607  .Case("eq", AArch64CC::EQ)
2608  .Case("ne", AArch64CC::NE)
2609  .Case("cs", AArch64CC::HS)
2610  .Case("hs", AArch64CC::HS)
2611  .Case("cc", AArch64CC::LO)
2612  .Case("lo", AArch64CC::LO)
2613  .Case("mi", AArch64CC::MI)
2614  .Case("pl", AArch64CC::PL)
2615  .Case("vs", AArch64CC::VS)
2616  .Case("vc", AArch64CC::VC)
2617  .Case("hi", AArch64CC::HI)
2618  .Case("ls", AArch64CC::LS)
2619  .Case("ge", AArch64CC::GE)
2620  .Case("lt", AArch64CC::LT)
2621  .Case("gt", AArch64CC::GT)
2622  .Case("le", AArch64CC::LE)
2623  .Case("al", AArch64CC::AL)
2624  .Case("nv", AArch64CC::NV)
2626 
2627  if (CC == AArch64CC::Invalid &&
2628  getSTI().getFeatureBits()[AArch64::FeatureSVE])
2630  .Case("none", AArch64CC::EQ)
2631  .Case("any", AArch64CC::NE)
2632  .Case("nlast", AArch64CC::HS)
2633  .Case("last", AArch64CC::LO)
2634  .Case("first", AArch64CC::MI)
2635  .Case("nfrst", AArch64CC::PL)
2636  .Case("pmore", AArch64CC::HI)
2637  .Case("plast", AArch64CC::LS)
2638  .Case("tcont", AArch64CC::GE)
2639  .Case("tstop", AArch64CC::LT)
2641 
2642  return CC;
2643 }
2644 
2645 /// parseCondCode - Parse a Condition Code operand.
2646 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2647  bool invertCondCode) {
2648  MCAsmParser &Parser = getParser();
2649  SMLoc S = getLoc();
2650  const AsmToken &Tok = Parser.getTok();
2651  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2652 
2653  StringRef Cond = Tok.getString();
2654  AArch64CC::CondCode CC = parseCondCodeString(Cond);
2655  if (CC == AArch64CC::Invalid)
2656  return TokError("invalid condition code");
2657  Parser.Lex(); // Eat identifier token.
2658 
2659  if (invertCondCode) {
2660  if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2661  return TokError("condition codes AL and NV are invalid for this instruction");
2663  }
2664 
2665  Operands.push_back(
2666  AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2667  return false;
2668 }
2669 
2670 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2671 /// them if present.
2673 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2674  MCAsmParser &Parser = getParser();
2675  const AsmToken &Tok = Parser.getTok();
2676  std::string LowerID = Tok.getString().lower();
2679  .Case("lsl", AArch64_AM::LSL)
2680  .Case("lsr", AArch64_AM::LSR)
2681  .Case("asr", AArch64_AM::ASR)
2682  .Case("ror", AArch64_AM::ROR)
2683  .Case("msl", AArch64_AM::MSL)
2684  .Case("uxtb", AArch64_AM::UXTB)
2685  .Case("uxth", AArch64_AM::UXTH)
2686  .Case("uxtw", AArch64_AM::UXTW)
2687  .Case("uxtx", AArch64_AM::UXTX)
2688  .Case("sxtb", AArch64_AM::SXTB)
2689  .Case("sxth", AArch64_AM::SXTH)
2690  .Case("sxtw", AArch64_AM::SXTW)
2691  .Case("sxtx", AArch64_AM::SXTX)
2693 
2694  if (ShOp == AArch64_AM::InvalidShiftExtend)
2695  return MatchOperand_NoMatch;
2696 
2697  SMLoc S = Tok.getLoc();
2698  Parser.Lex();
2699 
2700  bool Hash = parseOptionalToken(AsmToken::Hash);
2701 
2702  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2703  if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2704  ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2705  ShOp == AArch64_AM::MSL) {
2706  // We expect a number here.
2707  TokError("expected #imm after shift specifier");
2708  return MatchOperand_ParseFail;
2709  }
2710 
2711  // "extend" type operations don't need an immediate, #0 is implicit.
2712  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2713  Operands.push_back(
2714  AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2715  return MatchOperand_Success;
2716  }
2717 
2718  // Make sure we do actually have a number, identifier or a parenthesized
2719  // expression.
2720  SMLoc E = Parser.getTok().getLoc();
2721  if (!Parser.getTok().is(AsmToken::Integer) &&
2722  !Parser.getTok().is(AsmToken::LParen) &&
2723  !Parser.getTok().is(AsmToken::Identifier)) {
2724  Error(E, "expected integer shift amount");
2725  return MatchOperand_ParseFail;
2726  }
2727 
2728  const MCExpr *ImmVal;
2729  if (getParser().parseExpression(ImmVal))
2730  return MatchOperand_ParseFail;
2731 
2732  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2733  if (!MCE) {
2734  Error(E, "expected constant '#imm' after shift specifier");
2735  return MatchOperand_ParseFail;
2736  }
2737 
2738  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2739  Operands.push_back(AArch64Operand::CreateShiftExtend(
2740  ShOp, MCE->getValue(), true, S, E, getContext()));
2741  return MatchOperand_Success;
2742 }
2743 
2744 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2745  if (FBS[AArch64::HasV8_1aOps])
2746  Str += "ARMv8.1a";
2747  else if (FBS[AArch64::HasV8_2aOps])
2748  Str += "ARMv8.2a";
2749  else if (FBS[AArch64::HasV8_3aOps])
2750  Str += "ARMv8.3a";
2751  else if (FBS[AArch64::HasV8_4aOps])
2752  Str += "ARMv8.4a";
2753  else
2754  Str += "(unknown)";
2755 }
2756 
2757 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2758  SMLoc S) {
2759  const uint16_t Op2 = Encoding & 7;
2760  const uint16_t Cm = (Encoding & 0x78) >> 3;
2761  const uint16_t Cn = (Encoding & 0x780) >> 7;
2762  const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2763 
2764  const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2765 
2766  Operands.push_back(
2767  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2768  Operands.push_back(
2769  AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2770  Operands.push_back(
2771  AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2772  Expr = MCConstantExpr::create(Op2, getContext());
2773  Operands.push_back(
2774  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2775 }
2776 
2777 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2778 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2779 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2780  OperandVector &Operands) {
2781  if (Name.find('.') != StringRef::npos)
2782  return TokError("invalid operand");
2783 
2784  Mnemonic = Name;
2785  Operands.push_back(
2786  AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2787 
2788  MCAsmParser &Parser = getParser();
2789  const AsmToken &Tok = Parser.getTok();
2790  StringRef Op = Tok.getString();
2791  SMLoc S = Tok.getLoc();
2792 
2793  if (Mnemonic == "ic") {
2794  const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2795  if (!IC)
2796  return TokError("invalid operand for IC instruction");
2797  else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2798  std::string Str("IC " + std::string(IC->Name) + " requires ");
2800  return TokError(Str.c_str());
2801  }
2802  createSysAlias(IC->Encoding, Operands, S);
2803  } else if (Mnemonic == "dc") {
2804  const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2805  if (!DC)
2806  return TokError("invalid operand for DC instruction");
2807  else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2808  std::string Str("DC " + std::string(DC->Name) + " requires ");
2810  return TokError(Str.c_str());
2811  }
2812  createSysAlias(DC->Encoding, Operands, S);
2813  } else if (Mnemonic == "at") {
2814  const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2815  if (!AT)
2816  return TokError("invalid operand for AT instruction");
2817  else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2818  std::string Str("AT " + std::string(AT->Name) + " requires ");
2820  return TokError(Str.c_str());
2821  }
2822  createSysAlias(AT->Encoding, Operands, S);
2823  } else if (Mnemonic == "tlbi") {
2824  const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2825  if (!TLBI)
2826  return TokError("invalid operand for TLBI instruction");
2827  else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2828  std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2830  return TokError(Str.c_str());
2831  }
2832  createSysAlias(TLBI->Encoding, Operands, S);
2833  }
2834 
2835  Parser.Lex(); // Eat operand.
2836 
2837  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2838  bool HasRegister = false;
2839 
2840  // Check for the optional register operand.
2841  if (parseOptionalToken(AsmToken::Comma)) {
2842  if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2843  return TokError("expected register operand");
2844  HasRegister = true;
2845  }
2846 
2847  if (ExpectRegister && !HasRegister)
2848  return TokError("specified " + Mnemonic + " op requires a register");
2849  else if (!ExpectRegister && HasRegister)
2850  return TokError("specified " + Mnemonic + " op does not use a register");
2851 
2852  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2853  return true;
2854 
2855  return false;
2856 }
2857 
2859 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2860  MCAsmParser &Parser = getParser();
2861  const AsmToken &Tok = Parser.getTok();
2862 
2863  if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
2864  TokError("'csync' operand expected");
2865  return MatchOperand_ParseFail;
2866  // Can be either a #imm style literal or an option name
2867  } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
2868  // Immediate operand.
2869  const MCExpr *ImmVal;
2870  SMLoc ExprLoc = getLoc();
2871  if (getParser().parseExpression(ImmVal))
2872  return MatchOperand_ParseFail;
2873  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2874  if (!MCE) {
2875  Error(ExprLoc, "immediate value expected for barrier operand");
2876  return MatchOperand_ParseFail;
2877  }
2878  if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2879  Error(ExprLoc, "barrier operand out of range");
2880  return MatchOperand_ParseFail;
2881  }
2882  auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
2883  Operands.push_back(AArch64Operand::CreateBarrier(
2884  MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
2885  return MatchOperand_Success;
2886  }
2887 
2888  if (Tok.isNot(AsmToken::Identifier)) {
2889  TokError("invalid operand for instruction");
2890  return MatchOperand_ParseFail;
2891  }
2892 
2893  auto TSB = AArch64TSB::lookupTSBByName(Tok.getString());
2894  // The only valid named option for ISB is 'sy'
2895  auto DB = AArch64DB::lookupDBByName(Tok.getString());
2896  if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
2897  TokError("'sy' or #imm operand expected");
2898  return MatchOperand_ParseFail;
2899  // The only valid named option for TSB is 'csync'
2900  } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
2901  TokError("'csync' operand expected");
2902  return MatchOperand_ParseFail;
2903  } else if (!DB && !TSB) {
2904  TokError("invalid barrier option name");
2905  return MatchOperand_ParseFail;
2906  }
2907 
2908  Operands.push_back(AArch64Operand::CreateBarrier(
2909  DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext()));
2910  Parser.Lex(); // Consume the option
2911 
2912  return MatchOperand_Success;
2913 }
2914 
2916 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2917  MCAsmParser &Parser = getParser();
2918  const AsmToken &Tok = Parser.getTok();
2919 
2920  if (Tok.isNot(AsmToken::Identifier))
2921  return MatchOperand_NoMatch;
2922 
2923  int MRSReg, MSRReg;
2924  auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
2925  if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
2926  MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
2927  MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
2928  } else
2929  MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
2930 
2931  auto PState = AArch64PState::lookupPStateByName(Tok.getString());
2932  unsigned PStateImm = -1;
2933  if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
2934  PStateImm = PState->Encoding;
2935 
2936  Operands.push_back(
2937  AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
2938  PStateImm, getContext()));
2939  Parser.Lex(); // Eat identifier
2940 
2941  return MatchOperand_Success;
2942 }
2943 
2944 /// tryParseNeonVectorRegister - Parse a vector register operand.
2945 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
2946  MCAsmParser &Parser = getParser();
2947  if (Parser.getTok().isNot(AsmToken::Identifier))
2948  return true;
2949 
2950  SMLoc S = getLoc();
2951  // Check for a vector register specifier first.
2952  StringRef Kind;
2953  unsigned Reg;
2954  OperandMatchResultTy Res =
2955  tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
2956  if (Res != MatchOperand_Success)
2957  return true;
2958 
2959  const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
2960  if (!KindRes)
2961  return true;
2962 
2963  unsigned ElementWidth = KindRes->second;
2964  Operands.push_back(
2965  AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
2966  S, getLoc(), getContext()));
2967 
2968  // If there was an explicit qualifier, that goes on as a literal text
2969  // operand.
2970  if (!Kind.empty())
2971  Operands.push_back(
2972  AArch64Operand::CreateToken(Kind, false, S, getContext()));
2973 
2974  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
2975 }
2976 
2978 AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
2979  SMLoc SIdx = getLoc();
2980  if (parseOptionalToken(AsmToken::LBrac)) {
2981  const MCExpr *ImmVal;
2982  if (getParser().parseExpression(ImmVal))
2983  return MatchOperand_NoMatch;
2984  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2985  if (!MCE) {
2986  TokError("immediate value expected for vector index");
2987  return MatchOperand_ParseFail;;
2988  }
2989 
2990  SMLoc E = getLoc();
2991 
2992  if (parseToken(AsmToken::RBrac, "']' expected"))
2993  return MatchOperand_ParseFail;;
2994 
2995  Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2996  E, getContext()));
2997  return MatchOperand_Success;
2998  }
2999 
3000  return MatchOperand_NoMatch;
3001 }
3002 
3003 // tryParseVectorRegister - Try to parse a vector register name with
3004 // optional kind specifier. If it is a register specifier, eat the token
3005 // and return it.
3007 AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3008  RegKind MatchKind) {
3009  MCAsmParser &Parser = getParser();
3010  const AsmToken &Tok = Parser.getTok();
3011 
3012  if (Tok.isNot(AsmToken::Identifier))
3013  return MatchOperand_NoMatch;
3014 
3015  StringRef Name = Tok.getString();
3016  // If there is a kind specifier, it's separated from the register name by
3017  // a '.'.
3018  size_t Start = 0, Next = Name.find('.');
3019  StringRef Head = Name.slice(Start, Next);
3020  unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3021 
3022  if (RegNum) {
3023  if (Next != StringRef::npos) {
3024  Kind = Name.slice(Next, StringRef::npos);
3025  if (!isValidVectorKind(Kind, MatchKind)) {
3026  TokError("invalid vector kind qualifier");
3027  return MatchOperand_ParseFail;
3028  }
3029  }
3030  Parser.Lex(); // Eat the register token.
3031 
3032  Reg = RegNum;
3033  return MatchOperand_Success;
3034  }
3035 
3036  return MatchOperand_NoMatch;
3037 }
3038 
3039 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3041 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3042  // Check for a SVE predicate register specifier first.
3043  const SMLoc S = getLoc();
3044  StringRef Kind;
3045  unsigned RegNum;
3046  auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3047  if (Res != MatchOperand_Success)
3048  return Res;
3049 
3050  const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3051  if (!KindRes)
3052  return MatchOperand_NoMatch;
3053 
3054  unsigned ElementWidth = KindRes->second;
3055  Operands.push_back(AArch64Operand::CreateVectorReg(
3056  RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3057  getLoc(), getContext()));
3058 
3059  // Not all predicates are followed by a '/m' or '/z'.
3060  MCAsmParser &Parser = getParser();
3061  if (Parser.getTok().isNot(AsmToken::Slash))
3062  return MatchOperand_Success;
3063 
3064  // But when they do they shouldn't have an element type suffix.
3065  if (!Kind.empty()) {
3066  Error(S, "not expecting size suffix");
3067  return MatchOperand_ParseFail;
3068  }
3069 
3070  // Add a literal slash as operand
3071  Operands.push_back(
3072  AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3073 
3074  Parser.Lex(); // Eat the slash.
3075 
3076  // Zeroing or merging?
3077  auto Pred = Parser.getTok().getString().lower();
3078  if (Pred != "z" && Pred != "m") {
3079  Error(getLoc(), "expecting 'm' or 'z' predication");
3080  return MatchOperand_ParseFail;
3081  }
3082 
3083  // Add zero/merge token.
3084  const char *ZM = Pred == "z" ? "z" : "m";
3085  Operands.push_back(
3086  AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3087 
3088  Parser.Lex(); // Eat zero/merge token.
3089  return MatchOperand_Success;
3090 }
3091 
3092 /// parseRegister - Parse a register operand.
3093 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3094  // Try for a Neon vector register.
3095  if (!tryParseNeonVectorRegister(Operands))
3096  return false;
3097 
3098  // Otherwise try for a scalar register.
3099  if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3100  return false;
3101 
3102  return true;
3103 }
3104 
3105 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3106  MCAsmParser &Parser = getParser();
3107  bool HasELFModifier = false;
3109 
3110  if (parseOptionalToken(AsmToken::Colon)) {
3111  HasELFModifier = true;
3112 
3113  if (Parser.getTok().isNot(AsmToken::Identifier))
3114  return TokError("expect relocation specifier in operand after ':'");
3115 
3116  std::string LowerCase = Parser.getTok().getIdentifier().lower();
3117  RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3118  .Case("lo12", AArch64MCExpr::VK_LO12)
3119  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3120  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3121  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3122  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3123  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3124  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3125  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3126  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3127  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3128  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3129  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3130  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3131  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3132  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3133  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3134  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3135  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3136  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3137  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3138  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3139  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3140  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3141  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3142  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3143  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3144  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3145  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3147  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3149  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3150  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3151  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3153  .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3154  .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3156 
3157  if (RefKind == AArch64MCExpr::VK_INVALID)
3158  return TokError("expect relocation specifier in operand after ':'");
3159 
3160  Parser.Lex(); // Eat identifier
3161 
3162  if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3163  return true;
3164  }
3165 
3166  if (getParser().parseExpression(ImmVal))
3167  return true;
3168 
3169  if (HasELFModifier)
3170  ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3171 
3172  return false;
3173 }
3174 
3175 template <RegKind VectorKind>
3177 AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3178  bool ExpectMatch) {
3179  MCAsmParser &Parser = getParser();
3180  if (!Parser.getTok().is(AsmToken::LCurly))
3181  return MatchOperand_NoMatch;
3182 
3183  // Wrapper around parse function
3184  auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3185  bool NoMatchIsError) {
3186  auto RegTok = Parser.getTok();
3187  auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3188  if (ParseRes == MatchOperand_Success) {
3189  if (parseVectorKind(Kind, VectorKind))
3190  return ParseRes;
3191  llvm_unreachable("Expected a valid vector kind");
3192  }
3193 
3194  if (RegTok.isNot(AsmToken::Identifier) ||
3195  ParseRes == MatchOperand_ParseFail ||
3196  (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3197  Error(Loc, "vector register expected");
3198  return MatchOperand_ParseFail;
3199  }
3200 
3201  return MatchOperand_NoMatch;
3202  };
3203 
3204  SMLoc S = getLoc();
3205  auto LCurly = Parser.getTok();
3206  Parser.Lex(); // Eat left bracket token.
3207 
3208  StringRef Kind;
3209  unsigned FirstReg;
3210  auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3211 
3212  // Put back the original left bracket if there was no match, so that
3213  // different types of list-operands can be matched (e.g. SVE, Neon).
3214  if (ParseRes == MatchOperand_NoMatch)
3215  Parser.getLexer().UnLex(LCurly);
3216 
3217  if (ParseRes != MatchOperand_Success)
3218  return ParseRes;
3219 
3220  int64_t PrevReg = FirstReg;
3221  unsigned Count = 1;
3222 
3223  if (parseOptionalToken(AsmToken::Minus)) {
3224  SMLoc Loc = getLoc();
3225  StringRef NextKind;
3226 
3227  unsigned Reg;
3228  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3229  if (ParseRes != MatchOperand_Success)
3230  return ParseRes;
3231 
3232  // Any Kind suffices must match on all regs in the list.
3233  if (Kind != NextKind) {
3234  Error(Loc, "mismatched register size suffix");
3235  return MatchOperand_ParseFail;
3236  }
3237 
3238  unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3239 
3240  if (Space == 0 || Space > 3) {
3241  Error(Loc, "invalid number of vectors");
3242  return MatchOperand_ParseFail;
3243  }
3244 
3245  Count += Space;
3246  }
3247  else {
3248  while (parseOptionalToken(AsmToken::Comma)) {
3249  SMLoc Loc = getLoc();
3250  StringRef NextKind;
3251  unsigned Reg;
3252  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3253  if (ParseRes != MatchOperand_Success)
3254  return ParseRes;
3255 
3256  // Any Kind suffices must match on all regs in the list.
3257  if (Kind != NextKind) {
3258  Error(Loc, "mismatched register size suffix");
3259  return MatchOperand_ParseFail;
3260  }
3261 
3262  // Registers must be incremental (with wraparound at 31)
3263  if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3264  (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3265  Error(Loc, "registers must be sequential");
3266  return MatchOperand_ParseFail;
3267  }
3268 
3269  PrevReg = Reg;
3270  ++Count;
3271  }
3272  }
3273 
3274  if (parseToken(AsmToken::RCurly, "'}' expected"))
3275  return MatchOperand_ParseFail;
3276 
3277  if (Count > 4) {
3278  Error(S, "invalid number of vectors");
3279  return MatchOperand_ParseFail;
3280  }
3281 
3282  unsigned NumElements = 0;
3283  unsigned ElementWidth = 0;
3284  if (!Kind.empty()) {
3285  if (const auto &VK = parseVectorKind(Kind, VectorKind))
3286  std::tie(NumElements, ElementWidth) = *VK;
3287  }
3288 
3289  Operands.push_back(AArch64Operand::CreateVectorList(
3290  FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3291  getContext()));
3292 
3293  return MatchOperand_Success;
3294 }
3295 
3296 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3297 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3298  auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3299  if (ParseRes != MatchOperand_Success)
3300  return true;
3301 
3302  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3303 }
3304 
3306 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3307  SMLoc StartLoc = getLoc();
3308 
3309  unsigned RegNum;
3310  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3311  if (Res != MatchOperand_Success)
3312  return Res;
3313 
3314  if (!parseOptionalToken(AsmToken::Comma)) {
3315  Operands.push_back(AArch64Operand::CreateReg(
3316  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3317  return MatchOperand_Success;
3318  }
3319 
3320  parseOptionalToken(AsmToken::Hash);
3321 
3322  if (getParser().getTok().isNot(AsmToken::Integer)) {
3323  Error(getLoc(), "index must be absent or #0");
3324  return MatchOperand_ParseFail;
3325  }
3326 
3327  const MCExpr *ImmVal;
3328  if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3329  cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3330  Error(getLoc(), "index must be absent or #0");
3331  return MatchOperand_ParseFail;
3332  }
3333 
3334  Operands.push_back(AArch64Operand::CreateReg(
3335  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3336  return MatchOperand_Success;
3337 }
3338 
3339 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3341 AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3342  SMLoc StartLoc = getLoc();
3343 
3344  unsigned RegNum;
3345  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3346  if (Res != MatchOperand_Success)
3347  return Res;
3348 
3349  // No shift/extend is the default.
3350  if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3351  Operands.push_back(AArch64Operand::CreateReg(
3352  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3353  return MatchOperand_Success;
3354  }
3355 
3356  // Eat the comma
3357  getParser().Lex();
3358 
3359  // Match the shift
3361  Res = tryParseOptionalShiftExtend(ExtOpnd);
3362  if (Res != MatchOperand_Success)
3363  return Res;
3364 
3365  auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3366  Operands.push_back(AArch64Operand::CreateReg(
3367  RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3368  Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3369  Ext->hasShiftExtendAmount()));
3370 
3371  return MatchOperand_Success;
3372 }
3373 
3374 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3375  MCAsmParser &Parser = getParser();
3376 
3377  // Some SVE instructions have a decoration after the immediate, i.e.
3378  // "mul vl". We parse them here and add tokens, which must be present in the
3379  // asm string in the tablegen instruction.
3380  bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3381  bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3382  if (!Parser.getTok().getString().equals_lower("mul") ||
3383  !(NextIsVL || NextIsHash))
3384  return true;
3385 
3386  Operands.push_back(
3387  AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3388  Parser.Lex(); // Eat the "mul"
3389 
3390  if (NextIsVL) {
3391  Operands.push_back(
3392  AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3393  Parser.Lex(); // Eat the "vl"
3394  return false;
3395  }
3396 
3397  if (NextIsHash) {
3398  Parser.Lex(); // Eat the #
3399  SMLoc S = getLoc();
3400 
3401  // Parse immediate operand.
3402  const MCExpr *ImmVal;
3403  if (!Parser.parseExpression(ImmVal))
3404  if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3405  Operands.push_back(AArch64Operand::CreateImm(
3406  MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3407  getContext()));
3408  return MatchOperand_Success;
3409  }
3410  }
3411 
3412  return Error(getLoc(), "expected 'vl' or '#<imm>'");
3413 }
3414 
3415 /// parseOperand - Parse a arm instruction operand. For now this parses the
3416 /// operand regardless of the mnemonic.
3417 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3418  bool invertCondCode) {
3419  MCAsmParser &Parser = getParser();
3420 
3421  OperandMatchResultTy ResTy =
3422  MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3423 
3424  // Check if the current operand has a custom associated parser, if so, try to
3425  // custom parse the operand, or fallback to the general approach.
3426  if (ResTy == MatchOperand_Success)
3427  return false;
3428  // If there wasn't a custom match, try the generic matcher below. Otherwise,
3429  // there was a match, but an error occurred, in which case, just return that
3430  // the operand parsing failed.
3431  if (ResTy == MatchOperand_ParseFail)
3432  return true;
3433 
3434  // Nothing custom, so do general case parsing.
3435  SMLoc S, E;
3436  switch (getLexer().getKind()) {
3437  default: {
3438  SMLoc S = getLoc();
3439  const MCExpr *Expr;
3440  if (parseSymbolicImmVal(Expr))
3441  return Error(S, "invalid operand");
3442 
3443  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3444  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3445  return false;
3446  }
3447  case AsmToken::LBrac: {
3448  SMLoc Loc = Parser.getTok().getLoc();
3449  Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3450  getContext()));
3451  Parser.Lex(); // Eat '['
3452 
3453  // There's no comma after a '[', so we can parse the next operand
3454  // immediately.
3455  return parseOperand(Operands, false, false);
3456  }
3457  case AsmToken::LCurly:
3458  return parseNeonVectorList(Operands);
3459  case AsmToken::Identifier: {
3460  // If we're expecting a Condition Code operand, then just parse that.
3461  if (isCondCode)
3462  return parseCondCode(Operands, invertCondCode);
3463 
3464  // If it's a register name, parse it.
3465  if (!parseRegister(Operands))
3466  return false;
3467 
3468  // See if this is a "mul vl" decoration or "mul #<int>" operand used
3469  // by SVE instructions.
3470  if (!parseOptionalMulOperand(Operands))
3471  return false;
3472 
3473  // This could be an optional "shift" or "extend" operand.
3474  OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3475  // We can only continue if no tokens were eaten.
3476  if (GotShift != MatchOperand_NoMatch)
3477  return GotShift;
3478 
3479  // This was not a register so parse other operands that start with an
3480  // identifier (like labels) as expressions and create them as immediates.
3481  const MCExpr *IdVal;
3482  S = getLoc();
3483  if (getParser().parseExpression(IdVal))
3484  return true;
3485  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3486  Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3487  return false;
3488  }
3489  case AsmToken::Integer:
3490  case AsmToken::Real:
3491  case AsmToken::Hash: {
3492  // #42 -> immediate.
3493  S = getLoc();
3494 
3495  parseOptionalToken(AsmToken::Hash);
3496 
3497  // Parse a negative sign
3498  bool isNegative = false;
3499  if (Parser.getTok().is(AsmToken::Minus)) {
3500  isNegative = true;
3501  // We need to consume this token only when we have a Real, otherwise
3502  // we let parseSymbolicImmVal take care of it
3503  if (Parser.getLexer().peekTok().is(AsmToken::Real))
3504  Parser.Lex();
3505  }
3506 
3507  // The only Real that should come through here is a literal #0.0 for
3508  // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3509  // so convert the value.
3510  const AsmToken &Tok = Parser.getTok();
3511  if (Tok.is(AsmToken::Real)) {
3512  APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3513  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3514  if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3515  Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3516  Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3517  return TokError("unexpected floating point literal");
3518  else if (IntVal != 0 || isNegative)
3519  return TokError("expected floating-point constant #0.0");
3520  Parser.Lex(); // Eat the token.
3521 
3522  Operands.push_back(
3523  AArch64Operand::CreateToken("#0", false, S, getContext()));
3524  Operands.push_back(
3525  AArch64Operand::CreateToken(".0", false, S, getContext()));
3526  return false;
3527  }
3528 
3529  const MCExpr *ImmVal;
3530  if (parseSymbolicImmVal(ImmVal))
3531  return true;
3532 
3533  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3534  Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3535  return false;
3536  }
3537  case AsmToken::Equal: {
3538  SMLoc Loc = getLoc();
3539  if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3540  return TokError("unexpected token in operand");
3541  Parser.Lex(); // Eat '='
3542  const MCExpr *SubExprVal;
3543  if (getParser().parseExpression(SubExprVal))
3544  return true;
3545 
3546  if (Operands.size() < 2 ||
3547  !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3548  return Error(Loc, "Only valid when first operand is register");
3549 
3550  bool IsXReg =
3551  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3552  Operands[1]->getReg());
3553 
3554  MCContext& Ctx = getContext();
3555  E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3556  // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3557  if (isa<MCConstantExpr>(SubExprVal)) {
3558  uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3559  uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3560  while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3561  ShiftAmt += 16;
3562  Imm >>= 16;
3563  }
3564  if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3565  Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3566  Operands.push_back(AArch64Operand::CreateImm(
3567  MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3568  if (ShiftAmt)
3569  Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3570  ShiftAmt, true, S, E, Ctx));
3571  return false;
3572  }
3573  APInt Simm = APInt(64, Imm << ShiftAmt);
3574  // check if the immediate is an unsigned or signed 32-bit int for W regs
3575  if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3576  return Error(Loc, "Immediate too large for register");
3577  }
3578  // If it is a label or an imm that cannot fit in a movz, put it into CP.
3579  const MCExpr *CPLoc =
3580  getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3581  Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3582  return false;
3583  }
3584  }
3585 }
3586 
3587 bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3588  const MCParsedAsmOperand &Op2) const {
3589  auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3590  auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3591  if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3592  AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3593  return MCTargetAsmParser::regsEqual(Op1, Op2);
3594 
3595  assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
3596  "Testing equality of non-scalar registers not supported");
3597 
3598  // Check if a registers match their sub/super register classes.
3599  if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3600  return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3601  if (AOp1.getRegEqualityTy() == EqualsSubReg)
3602  return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3603  if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3604  return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3605  if (AOp2.getRegEqualityTy() == EqualsSubReg)
3606  return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3607 
3608  return false;
3609 }
3610 
3611 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3612 /// operands.
3613 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3614  StringRef Name, SMLoc NameLoc,
3615  OperandVector &Operands) {
3616  MCAsmParser &Parser = getParser();
3617  Name = StringSwitch<StringRef>(Name.lower())
3618  .Case("beq", "b.eq")
3619  .Case("bne", "b.ne")
3620  .Case("bhs", "b.hs")
3621  .Case("bcs", "b.cs")
3622  .Case("blo", "b.lo")
3623  .Case("bcc", "b.cc")
3624  .Case("bmi", "b.mi")
3625  .Case("bpl", "b.pl")
3626  .Case("bvs", "b.vs")
3627  .Case("bvc", "b.vc")
3628  .Case("bhi", "b.hi")
3629  .Case("bls", "b.ls")
3630  .Case("bge", "b.ge")
3631  .Case("blt", "b.lt")
3632  .Case("bgt", "b.gt")
3633  .Case("ble", "b.le")
3634  .Case("bal", "b.al")
3635  .Case("bnv", "b.nv")
3636  .Default(Name);
3637 
3638  // First check for the AArch64-specific .req directive.
3639  if (Parser.getTok().is(AsmToken::Identifier) &&
3640  Parser.getTok().getIdentifier() == ".req") {
3641  parseDirectiveReq(Name, NameLoc);
3642  // We always return 'error' for this, as we're done with this
3643  // statement and don't need to match the 'instruction."
3644  return true;
3645  }
3646 
3647  // Create the leading tokens for the mnemonic, split by '.' characters.
3648  size_t Start = 0, Next = Name.find('.');
3649  StringRef Head = Name.slice(Start, Next);
3650 
3651  // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3652  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
3653  return parseSysAlias(Head, NameLoc, Operands);
3654 
3655  Operands.push_back(
3656  AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3657  Mnemonic = Head;
3658 
3659  // Handle condition codes for a branch mnemonic
3660  if (Head == "b" && Next != StringRef::npos) {
3661  Start = Next;
3662  Next = Name.find('.', Start + 1);
3663  Head = Name.slice(Start + 1, Next);
3664 
3665  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3666  (Head.data() - Name.data()));
3667  AArch64CC::CondCode CC = parseCondCodeString(Head);
3668  if (CC == AArch64CC::Invalid)
3669  return Error(SuffixLoc, "invalid condition code");
3670  Operands.push_back(
3671  AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3672  Operands.push_back(
3673  AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3674  }
3675 
3676  // Add the remaining tokens in the mnemonic.
3677  while (Next != StringRef::npos) {
3678  Start = Next;
3679  Next = Name.find('.', Start + 1);
3680  Head = Name.slice(Start, Next);
3681  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3682  (Head.data() - Name.data()) + 1);
3683  Operands.push_back(
3684  AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3685  }
3686 
3687  // Conditional compare instructions have a Condition Code operand, which needs
3688  // to be parsed and an immediate operand created.
3689  bool condCodeFourthOperand =
3690  (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3691  Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3692  Head == "csinc" || Head == "csinv" || Head == "csneg");
3693 
3694  // These instructions are aliases to some of the conditional select
3695  // instructions. However, the condition code is inverted in the aliased
3696  // instruction.
3697  //
3698  // FIXME: Is this the correct way to handle these? Or should the parser
3699  // generate the aliased instructions directly?
3700  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3701  bool condCodeThirdOperand =
3702  (Head == "cinc" || Head == "cinv" || Head == "cneg");
3703 
3704  // Read the remaining operands.
3705  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3706  // Read the first operand.
3707  if (parseOperand(Operands, false, false)) {
3708  return true;
3709  }
3710 
3711  unsigned N = 2;
3712  while (parseOptionalToken(AsmToken::Comma)) {
3713  // Parse and remember the operand.
3714  if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3715  (N == 3 && condCodeThirdOperand) ||
3716  (N == 2 && condCodeSecondOperand),
3717  condCodeSecondOperand || condCodeThirdOperand)) {
3718  return true;
3719  }
3720 
3721  // After successfully parsing some operands there are two special cases to
3722  // consider (i.e. notional operands not separated by commas). Both are due
3723  // to memory specifiers:
3724  // + An RBrac will end an address for load/store/prefetch
3725  // + An '!' will indicate a pre-indexed operation.
3726  //
3727  // It's someone else's responsibility to make sure these tokens are sane
3728  // in the given context!
3729 
3730  SMLoc RLoc = Parser.getTok().getLoc();
3731  if (parseOptionalToken(AsmToken::RBrac))
3732  Operands.push_back(
3733  AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3734  SMLoc ELoc = Parser.getTok().getLoc();
3735  if (parseOptionalToken(AsmToken::Exclaim))
3736  Operands.push_back(
3737  AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3738 
3739  ++N;
3740  }
3741  }
3742 
3743  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3744  return true;
3745 
3746  return false;
3747 }
3748 
3749 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
3750  assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
3751  return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
3752  (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
3753  (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
3754  (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
3755  (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
3756  (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
3757 }
3758 
3759 // FIXME: This entire function is a giant hack to provide us with decent
3760 // operand range validation/diagnostics until TableGen/MC can be extended
3761 // to support autogeneration of this kind of validation.
3762 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
3763  SmallVectorImpl<SMLoc> &Loc) {
3764  const MCRegisterInfo *RI = getContext().getRegisterInfo();
3765  const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
3766 
3767  // A prefix only applies to the instruction following it. Here we extract
3768  // prefix information for the next instruction before validating the current
3769  // one so that in the case of failure we don't erronously continue using the
3770  // current prefix.
3771  PrefixInfo Prefix = NextPrefix;
3772  NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
3773 
3774  // Before validating the instruction in isolation we run through the rules
3775  // applicable when it follows a prefix instruction.
3776  // NOTE: brk & hlt can be prefixed but require no additional validation.
3777  if (Prefix.isActive() &&
3778  (Inst.getOpcode() != AArch64::BRK) &&
3779  (Inst.getOpcode() != AArch64::HLT)) {
3780 
3781  // Prefixed intructions must have a destructive operand.
3784  return Error(IDLoc, "instruction is unpredictable when following a"
3785  " movprfx, suggest replacing movprfx with mov");
3786 
3787  // Destination operands must match.
3788  if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
3789  return Error(Loc[0], "instruction is unpredictable when following a"
3790  " movprfx writing to a different destination");
3791 
3792  // Destination operand must not be used in any other location.
3793  for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
3794  if (Inst.getOperand(i).isReg() &&
3795  (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
3796  isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
3797  return Error(Loc[0], "instruction is unpredictable when following a"
3798  " movprfx and destination also used as non-destructive"
3799  " source");
3800  }
3801 
3802  auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
3803  if (Prefix.isPredicated()) {
3804  int PgIdx = -1;
3805 
3806  // Find the instructions general predicate.
3807  for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
3808  if (Inst.getOperand(i).isReg() &&
3809  PPRRegClass.contains(Inst.getOperand(i).getReg())) {
3810  PgIdx = i;
3811  break;
3812  }
3813 
3814  // Instruction must be predicated if the movprfx is predicated.
3815  if (PgIdx == -1 ||
3817  return Error(IDLoc, "instruction is unpredictable when following a"
3818  " predicated movprfx, suggest using unpredicated movprfx");
3819 
3820  // Instruction must use same general predicate as the movprfx.
3821  if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
3822  return Error(IDLoc, "instruction is unpredictable when following a"
3823  " predicated movprfx using a different general predicate");
3824 
3825  // Instruction element type must match the movprfx.
3826  if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
3827  return Error(IDLoc, "instruction is unpredictable when following a"
3828  " predicated movprfx with a different element size");
3829  }
3830  }
3831 
3832  // Check for indexed addressing modes w/ the base register being the
3833  // same as a destination/source register or pair load where
3834  // the Rt == Rt2. All of those are undefined behaviour.
3835  switch (Inst.getOpcode()) {
3836  case AArch64::LDPSWpre:
3837  case AArch64::LDPWpost:
3838  case AArch64::LDPWpre:
3839  case AArch64::LDPXpost:
3840  case AArch64::LDPXpre: {
3841  unsigned Rt = Inst.getOperand(1).getReg();
3842  unsigned Rt2 = Inst.getOperand(2).getReg();
3843  unsigned Rn = Inst.getOperand(3).getReg();
3844  if (RI->isSubRegisterEq(Rn, Rt))
3845  return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3846  "is also a destination");
3847  if (RI->isSubRegisterEq(Rn, Rt2))
3848  return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3849  "is also a destination");
3851  }
3852  case AArch64::LDPDi:
3853  case AArch64::LDPQi:
3854  case AArch64::LDPSi:
3855  case AArch64::LDPSWi:
3856  case AArch64::LDPWi:
3857  case AArch64::LDPXi: {
3858  unsigned Rt = Inst.getOperand(0).getReg();
3859  unsigned Rt2 = Inst.getOperand(1).getReg();
3860  if (Rt == Rt2)
3861  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3862  break;
3863  }
3864  case AArch64::LDPDpost:
3865  case AArch64::LDPDpre:
3866  case AArch64::LDPQpost:
3867  case AArch64::LDPQpre:
3868  case AArch64::LDPSpost:
3869  case AArch64::LDPSpre:
3870  case AArch64::LDPSWpost: {
3871  unsigned Rt = Inst.getOperand(1).getReg();
3872  unsigned Rt2 = Inst.getOperand(2).getReg();
3873  if (Rt == Rt2)
3874  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3875  break;
3876  }
3877  case AArch64::STPDpost:
3878  case AArch64::STPDpre:
3879  case AArch64::STPQpost:
3880  case AArch64::STPQpre:
3881  case AArch64::STPSpost:
3882  case AArch64::STPSpre:
3883  case AArch64::STPWpost:
3884  case AArch64::STPWpre:
3885  case AArch64::STPXpost:
3886  case AArch64::STPXpre: {
3887  unsigned Rt = Inst.getOperand(1).getReg();
3888  unsigned Rt2 = Inst.getOperand(2).getReg();
3889  unsigned Rn = Inst.getOperand(3).getReg();
3890  if (RI->isSubRegisterEq(Rn, Rt))
3891  return Error(Loc[0], "unpredictable STP instruction, writeback base "
3892  "is also a source");
3893  if (RI->isSubRegisterEq(Rn, Rt2))
3894  return Error(Loc[1], "unpredictable STP instruction, writeback base "
3895  "is also a source");
3896  break;
3897  }
3898  case AArch64::LDRBBpre:
3899  case AArch64::LDRBpre:
3900  case AArch64::LDRHHpre:
3901  case AArch64::LDRHpre:
3902  case AArch64::LDRSBWpre:
3903  case AArch64::LDRSBXpre:
3904  case AArch64::LDRSHWpre:
3905  case AArch64::LDRSHXpre:
3906  case AArch64::LDRSWpre:
3907  case AArch64::LDRWpre:
3908  case AArch64::LDRXpre:
3909  case AArch64::LDRBBpost:
3910  case AArch64::LDRBpost:
3911  case AArch64::LDRHHpost:
3912  case AArch64::LDRHpost:
3913  case AArch64::LDRSBWpost:
3914  case AArch64::LDRSBXpost:
3915  case AArch64::LDRSHWpost:
3916  case AArch64::LDRSHXpost:
3917  case AArch64::LDRSWpost:
3918  case AArch64::LDRWpost:
3919  case AArch64::LDRXpost: {
3920  unsigned Rt = Inst.getOperand(1).getReg();
3921  unsigned Rn = Inst.getOperand(2).getReg();
3922  if (RI->isSubRegisterEq(Rn, Rt))
3923  return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3924  "is also a source");
3925  break;
3926  }
3927  case AArch64::STRBBpost:
3928  case AArch64::STRBpost:
3929  case AArch64::STRHHpost:
3930  case AArch64::STRHpost:
3931  case AArch64::STRWpost:
3932  case AArch64::STRXpost:
3933  case AArch64::STRBBpre:
3934  case AArch64::STRBpre:
3935  case AArch64::STRHHpre:
3936  case AArch64::STRHpre:
3937  case AArch64::STRWpre:
3938  case AArch64::STRXpre: {
3939  unsigned Rt = Inst.getOperand(1).getReg();
3940  unsigned Rn = Inst.getOperand(2).getReg();
3941  if (RI->isSubRegisterEq(Rn, Rt))
3942  return Error(Loc[0], "unpredictable STR instruction, writeback base "
3943  "is also a source");
3944  break;
3945  }
3946  case AArch64::STXRB:
3947  case AArch64::STXRH:
3948  case AArch64::STXRW:
3949  case AArch64::STXRX:
3950  case AArch64::STLXRB:
3951  case AArch64::STLXRH:
3952  case AArch64::STLXRW:
3953  case AArch64::STLXRX: {
3954  unsigned Rs = Inst.getOperand(0).getReg();
3955  unsigned Rt = Inst.getOperand(1).getReg();
3956  unsigned Rn = Inst.getOperand(2).getReg();
3957  if (RI->isSubRegisterEq(Rt, Rs) ||
3958  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
3959  return Error(Loc[0],
3960  "unpredictable STXR instruction, status is also a source");
3961  break;
3962  }
3963  case AArch64::STXPW:
3964  case AArch64::STXPX:
3965  case AArch64::STLXPW:
3966  case AArch64::STLXPX: {
3967  unsigned Rs = Inst.getOperand(0).getReg();
3968  unsigned Rt1 = Inst.getOperand(1).getReg();
3969  unsigned Rt2 = Inst.getOperand(2).getReg();
3970  unsigned Rn = Inst.getOperand(3).getReg();
3971  if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
3972  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
3973  return Error(Loc[0],
3974  "unpredictable STXP instruction, status is also a source");
3975  break;
3976  }
3977  }
3978 
3979 
3980  // Now check immediate ranges. Separate from the above as there is overlap
3981  // in the instructions being checked and this keeps the nested conditionals
3982  // to a minimum.
3983  switch (Inst.getOpcode()) {
3984  case AArch64::ADDSWri:
3985  case AArch64::ADDSXri:
3986  case AArch64::ADDWri:
3987  case AArch64::ADDXri:
3988  case AArch64::SUBSWri:
3989  case AArch64::SUBSXri:
3990  case AArch64::SUBWri:
3991  case AArch64::SUBXri: {
3992  // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3993  // some slight duplication here.
3994  if (Inst.getOperand(2).isExpr()) {
3995  const MCExpr *Expr = Inst.getOperand(2).getExpr();
3996  AArch64MCExpr::VariantKind ELFRefKind;
3997  MCSymbolRefExpr::VariantKind DarwinRefKind;
3998  int64_t Addend;
3999  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4000 
4001  // Only allow these with ADDXri.
4002  if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4003  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4004  Inst.getOpcode() == AArch64::ADDXri)
4005  return false;
4006 
4007  // Only allow these with ADDXri/ADDWri
4008  if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4009  ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4010  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4011  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4012  ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4013  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4014  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4015  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4016  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4017  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4018  (Inst.getOpcode() == AArch64::ADDXri ||
4019  Inst.getOpcode() == AArch64::ADDWri))
4020  return false;
4021 
4022  // Don't allow symbol refs in the immediate field otherwise
4023  // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4024  // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4025  // 'cmp w0, 'borked')
4026  return Error(Loc.back(), "invalid immediate expression");
4027  }
4028  // We don't validate more complex expressions here
4029  }
4030  return false;
4031  }
4032  default:
4033  return false;
4034  }
4035 }
4036 
4037 static std::string AArch64MnemonicSpellCheck(StringRef S, uint64_t FBS,
4038  unsigned VariantID = 0);
4039 
4040 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4041  uint64_t ErrorInfo,
4042  OperandVector &Operands) {
4043  switch (ErrCode) {
4044  case Match_InvalidTiedOperand: {
4046  static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4047  .getRegEqualityTy();
4048  switch (EqTy) {
4049  case RegConstraintEqualityTy::EqualsSubReg:
4050  return Error(Loc, "operand must be 64-bit form of destination register");
4051  case RegConstraintEqualityTy::EqualsSuperReg:
4052  return Error(Loc, "operand must be 32-bit form of destination register");
4053  case RegConstraintEqualityTy::EqualsReg:
4054  return Error(Loc, "operand must match destination register");
4055  }
4056  llvm_unreachable("Unknown RegConstraintEqualityTy");
4057  }
4058  case Match_MissingFeature:
4059  return Error(Loc,
4060  "instruction requires a CPU feature not currently enabled");
4061  case Match_InvalidOperand:
4062  return Error(Loc, "invalid operand for instruction");
4063  case Match_InvalidSuffix:
4064  return Error(Loc, "invalid type suffix for instruction");
4065  case Match_InvalidCondCode:
4066  return Error(Loc, "expected AArch64 condition code");
4067  case Match_AddSubRegExtendSmall:
4068  return Error(Loc,
4069  "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
4070  case Match_AddSubRegExtendLarge:
4071  return Error(Loc,
4072  "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4073  case Match_AddSubSecondSource:
4074  return Error(Loc,
4075  "expected compatible register, symbol or integer in range [0, 4095]");
4076  case Match_LogicalSecondSource:
4077  return Error(Loc, "expected compatible register or logical immediate");
4078  case Match_InvalidMovImm32Shift:
4079  return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4080  case Match_InvalidMovImm64Shift:
4081  return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4082  case Match_AddSubRegShift32:
4083  return Error(Loc,
4084  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4085  case Match_AddSubRegShift64:
4086  return Error(Loc,
4087  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4088  case Match_InvalidFPImm:
4089  return Error(Loc,
4090  "expected compatible register or floating-point constant");
4091  case Match_InvalidMemoryIndexedSImm6:
4092  return Error(Loc, "index must be an integer in range [-32, 31].");
4093  case Match_InvalidMemoryIndexedSImm5:
4094  return Error(Loc, "index must be an integer in range [-16, 15].");
4095  case Match_InvalidMemoryIndexed1SImm4:
4096  return Error(Loc, "index must be an integer in range [-8, 7].");
4097  case Match_InvalidMemoryIndexed2SImm4:
4098  return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4099  case Match_InvalidMemoryIndexed3SImm4:
4100  return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4101  case Match_InvalidMemoryIndexed4SImm4:
4102  return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4103  case Match_InvalidMemoryIndexed16SImm4:
4104  return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4105  case Match_InvalidMemoryIndexed1SImm6:
4106  return Error(Loc, "index must be an integer in range [-32, 31].");
4107  case Match_InvalidMemoryIndexedSImm8:
4108  return Error(Loc, "index must be an integer in range [-128, 127].");
4109  case Match_InvalidMemoryIndexedSImm9:
4110  return Error(Loc, "index must be an integer in range [-256, 255].");
4111  case Match_InvalidMemoryIndexed8SImm10:
4112  return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4113  case Match_InvalidMemoryIndexed4SImm7:
4114  return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4115  case Match_InvalidMemoryIndexed8SImm7:
4116  return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4117  case Match_InvalidMemoryIndexed16SImm7:
4118  return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4119  case Match_InvalidMemoryIndexed8UImm5:
4120  return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4121  case Match_InvalidMemoryIndexed4UImm5:
4122  return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4123  case Match_InvalidMemoryIndexed2UImm5:
4124  return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4125  case Match_InvalidMemoryIndexed8UImm6:
4126  return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4127  case Match_InvalidMemoryIndexed4UImm6:
4128  return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4129  case Match_InvalidMemoryIndexed2UImm6:
4130  return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4131  case Match_InvalidMemoryIndexed1UImm6:
4132  return Error(Loc, "index must be in range [0, 63].");
4133  case Match_InvalidMemoryWExtend8:
4134  return Error(Loc,
4135  "expected 'uxtw' or 'sxtw' with optional shift of #0");
4136  case Match_InvalidMemoryWExtend16:
4137  return Error(Loc,
4138  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4139  case Match_InvalidMemoryWExtend32:
4140  return Error(Loc,
4141  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4142  case Match_InvalidMemoryWExtend64:
4143  return Error(Loc,
4144  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4145  case Match_InvalidMemoryWExtend128:
4146  return Error(Loc,
4147  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4148  case Match_InvalidMemoryXExtend8:
4149  return Error(Loc,
4150  "expected 'lsl' or 'sxtx' with optional shift of #0");
4151  case Match_InvalidMemoryXExtend16:
4152  return Error(Loc,
4153  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4154  case Match_InvalidMemoryXExtend32:
4155  return Error(Loc,
4156  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4157  case Match_InvalidMemoryXExtend64:
4158  return Error(Loc,
4159  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4160  case Match_InvalidMemoryXExtend128:
4161  return Error(Loc,
4162  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4163  case Match_InvalidMemoryIndexed1:
4164  return Error(Loc, "index must be an integer in range [0, 4095].");
4165  case Match_InvalidMemoryIndexed2:
4166  return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4167  case Match_InvalidMemoryIndexed4:
4168  return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4169  case Match_InvalidMemoryIndexed8:
4170  return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4171  case Match_InvalidMemoryIndexed16:
4172  return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4173  case Match_InvalidImm0_1:
4174  return Error(Loc, "immediate must be an integer in range [0, 1].");
4175  case Match_InvalidImm0_7:
4176  return Error(Loc, "immediate must be an integer in range [0, 7].");
4177  case Match_InvalidImm0_15:
4178  return Error(Loc, "immediate must be an integer in range [0, 15].");
4179  case Match_InvalidImm0_31:
4180  return Error(Loc, "immediate must be an integer in range [0, 31].");
4181  case Match_InvalidImm0_63:
4182  return Error(Loc, "immediate must be an integer in range [0, 63].");
4183  case Match_InvalidImm0_127:
4184  return Error(Loc, "immediate must be an integer in range [0, 127].");
4185  case Match_InvalidImm0_255:
4186  return Error(Loc, "immediate must be an integer in range [0, 255].");
4187  case Match_InvalidImm0_65535:
4188  return Error(Loc, "immediate must be an integer in range [0, 65535].");
4189  case Match_InvalidImm1_8:
4190  return Error(Loc, "immediate must be an integer in range [1, 8].");
4191  case Match_InvalidImm1_16:
4192  return Error(Loc, "immediate must be an integer in range [1, 16].");
4193  case Match_InvalidImm1_32:
4194  return Error(Loc, "immediate must be an integer in range [1, 32].");
4195  case Match_InvalidImm1_64:
4196  return Error(Loc, "immediate must be an integer in range [1, 64].");
4197  case Match_InvalidSVEAddSubImm8:
4198  return Error(Loc, "immediate must be an integer in range [0, 255]"
4199  " with a shift amount of 0");
4200  case Match_InvalidSVEAddSubImm16:
4201  case Match_InvalidSVEAddSubImm32:
4202  case Match_InvalidSVEAddSubImm64:
4203  return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4204  "multiple of 256 in range [256, 65280]");
4205  case Match_InvalidSVECpyImm8:
4206  return Error(Loc, "immediate must be an integer in range [-128, 255]"
4207  " with a shift amount of 0");
4208  case Match_InvalidSVECpyImm16:
4209  return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4210  "multiple of 256 in range [-32768, 65280]");
4211  case Match_InvalidSVECpyImm32:
4212  case Match_InvalidSVECpyImm64:
4213  return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4214  "multiple of 256 in range [-32768, 32512]");
4215  case Match_InvalidIndexRange1_1:
4216  return Error(Loc, "expected lane specifier '[1]'");
4217  case Match_InvalidIndexRange0_15:
4218  return Error(Loc, "vector lane must be an integer in range [0, 15].");
4219  case Match_InvalidIndexRange0_7:
4220  return Error(Loc, "vector lane must be an integer in range [0, 7].");
4221  case Match_InvalidIndexRange0_3:
4222  return Error(Loc, "vector lane must be an integer in range [0, 3].");
4223  case Match_InvalidIndexRange0_1:
4224  return Error(Loc, "vector lane must be an integer in range [0, 1].");
4225  case Match_InvalidSVEIndexRange0_63:
4226  return Error(Loc, "vector lane must be an integer in range [0, 63].");
4227  case Match_InvalidSVEIndexRange0_31:
4228  return Error(Loc, "vector lane must be an integer in range [0, 31].");
4229  case Match_InvalidSVEIndexRange0_15:
4230  return Error(Loc, "vector lane must be an integer in range [0, 15].");
4231  case Match_InvalidSVEIndexRange0_7:
4232  return Error(Loc, "vector lane must be an integer in range [0, 7].");
4233  case Match_InvalidSVEIndexRange0_3:
4234  return Error(Loc, "vector lane must be an integer in range [0, 3].");
4235  case Match_InvalidLabel:
4236  return Error(Loc, "expected label or encodable integer pc offset");
4237  case Match_MRS:
4238  return Error(Loc, "expected readable system register");
4239  case Match_MSR:
4240  return Error(Loc, "expected writable system register or pstate");
4241  case Match_InvalidComplexRotationEven:
4242  return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4243  case Match_InvalidComplexRotationOdd:
4244  return Error(Loc, "complex rotation must be 90 or 270.");
4245  case Match_MnemonicFail: {
4246  std::string Suggestion = AArch64MnemonicSpellCheck(
4247  ((AArch64Operand &)*Operands[0]).getToken(),
4248  ComputeAvailableFeatures(STI->getFeatureBits()));
4249  return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4250  }
4251  case Match_InvalidGPR64shifted8:
4252  return Error(Loc, "register must be x0..x30 or xzr, without shift");
4253  case Match_InvalidGPR64shifted16:
4254  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4255  case Match_InvalidGPR64shifted32:
4256  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4257  case Match_InvalidGPR64shifted64:
4258  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4259  case Match_InvalidGPR64NoXZRshifted8:
4260  return Error(Loc, "register must be x0..x30 without shift");
4261  case Match_InvalidGPR64NoXZRshifted16:
4262  return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4263  case Match_InvalidGPR64NoXZRshifted32:
4264  return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4265  case Match_InvalidGPR64NoXZRshifted64:
4266  return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4267  case Match_InvalidZPR32UXTW8:
4268  case Match_InvalidZPR32SXTW8:
4269  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4270  case Match_InvalidZPR32UXTW16:
4271  case Match_InvalidZPR32SXTW16:
4272  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4273  case Match_InvalidZPR32UXTW32:
4274  case Match_InvalidZPR32SXTW32:
4275  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4276  case Match_InvalidZPR32UXTW64:
4277  case Match_InvalidZPR32SXTW64:
4278  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4279  case Match_InvalidZPR64UXTW8:
4280  case Match_InvalidZPR64SXTW8:
4281  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4282  case Match_InvalidZPR64UXTW16:
4283  case Match_InvalidZPR64SXTW16:
4284  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4285  case Match_InvalidZPR64UXTW32:
4286  case Match_InvalidZPR64SXTW32:
4287  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4288  case Match_InvalidZPR64UXTW64:
4289  case Match_InvalidZPR64SXTW64:
4290  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4291  case Match_InvalidZPR32LSL8:
4292  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4293  case Match_InvalidZPR32LSL16:
4294  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4295  case Match_InvalidZPR32LSL32:
4296  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4297  case Match_InvalidZPR32LSL64:
4298  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4299  case Match_InvalidZPR64LSL8:
4300  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4301  case Match_InvalidZPR64LSL16:
4302  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4303  case Match_InvalidZPR64LSL32:
4304  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4305  case Match_InvalidZPR64LSL64:
4306  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4307  case Match_InvalidZPR0:
4308  return Error(Loc, "expected register without element width sufix");
4309  case Match_InvalidZPR8:
4310  case Match_InvalidZPR16:
4311  case Match_InvalidZPR32:
4312  case Match_InvalidZPR64:
4313  case Match_InvalidZPR128:
4314  return Error(Loc, "invalid element width");
4315  case Match_InvalidZPR_3b8:
4316  return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4317  case Match_InvalidZPR_3b16:
4318  return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4319  case Match_InvalidZPR_3b32:
4320  return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4321  case Match_InvalidZPR_4b16:
4322  return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4323  case Match_InvalidZPR_4b32:
4324  return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4325  case Match_InvalidZPR_4b64:
4326  return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4327  case Match_InvalidSVEPattern:
4328  return Error(Loc, "invalid predicate pattern");
4329  case Match_InvalidSVEPredicateAnyReg:
4330  case Match_InvalidSVEPredicateBReg:
4331  case Match_InvalidSVEPredicateHReg:
4332  case Match_InvalidSVEPredicateSReg:
4333  case Match_InvalidSVEPredicateDReg:
4334  return Error(Loc, "invalid predicate register.");
4335  case Match_InvalidSVEPredicate3bAnyReg:
4336  case Match_InvalidSVEPredicate3bBReg:
4337  case Match_InvalidSVEPredicate3bHReg:
4338  case Match_InvalidSVEPredicate3bSReg:
4339  case Match_InvalidSVEPredicate3bDReg:
4340  return Error(Loc, "restricted predicate has range [0, 7].");
4341  case Match_InvalidSVEExactFPImmOperandHalfOne:
4342  return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4343  case Match_InvalidSVEExactFPImmOperandHalfTwo:
4344  return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4345  case Match_InvalidSVEExactFPImmOperandZeroOne:
4346  return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4347  default:
4348  llvm_unreachable("unexpected error code!");
4349  }
4350 }
4351 
4352 static const char *getSubtargetFeatureName(uint64_t Val);
4353 
4354 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4355  OperandVector &Operands,
4356  MCStreamer &Out,
4357  uint64_t &ErrorInfo,
4358  bool MatchingInlineAsm) {
4359  assert(!Operands.empty() && "Unexpect empty operand list!");
4360  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4361  assert(Op.isToken() && "Leading operand should always be a mnemonic!");
4362 
4363  StringRef Tok = Op.getToken();
4364  unsigned NumOperands = Operands.size();
4365 
4366  if (NumOperands == 4 && Tok == "lsl") {
4367  AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4368  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4369  if (Op2.isScalarReg() && Op3.isImm()) {
4370  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4371  if (Op3CE) {
4372  uint64_t Op3Val = Op3CE->getValue();
4373  uint64_t NewOp3Val = 0;
4374  uint64_t NewOp4Val = 0;
4375  if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4376  Op2.getReg())) {
4377  NewOp3Val = (32 - Op3Val) & 0x1f;
4378  NewOp4Val = 31 - Op3Val;
4379  } else {
4380  NewOp3Val = (64 - Op3Val) & 0x3f;
4381  NewOp4Val = 63 - Op3Val;
4382  }
4383 
4384  const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4385  const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4386 
4387  Operands[0] = AArch64Operand::CreateToken(
4388  "ubfm", false, Op.getStartLoc(), getContext());
4389  Operands.push_back(AArch64Operand::CreateImm(
4390  NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4391  Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4392  Op3.getEndLoc(), getContext());
4393  }
4394  }
4395  } else if (NumOperands == 4 && Tok == "bfc") {
4396  // FIXME: Horrible hack to handle BFC->BFM alias.
4397  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4398  AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4399  AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4400 
4401  if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4402  const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4403  const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4404 
4405  if (LSBCE && WidthCE) {
4406  uint64_t LSB = LSBCE->getValue();
4407  uint64_t Width = WidthCE->getValue();
4408 
4409  uint64_t RegWidth = 0;
4410  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4411  Op1.getReg()))
4412  RegWidth = 64;
4413  else
4414  RegWidth = 32;
4415 
4416  if (LSB >= RegWidth)
4417  return Error(LSBOp.getStartLoc(),
4418  "expected integer in range [0, 31]");
4419  if (Width < 1 || Width > RegWidth)
4420  return Error(WidthOp.getStartLoc(),
4421  "expected integer in range [1, 32]");
4422 
4423  uint64_t ImmR = 0;
4424  if (RegWidth == 32)
4425  ImmR = (32 - LSB) & 0x1f;
4426  else
4427  ImmR = (64 - LSB) & 0x3f;
4428 
4429  uint64_t ImmS = Width - 1;
4430 
4431  if (ImmR != 0 && ImmS >= ImmR)
4432  return Error(WidthOp.getStartLoc(),
4433  "requested insert overflows register");
4434 
4435  const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4436  const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4437  Operands[0] = AArch64Operand::CreateToken(
4438  "bfm", false, Op.getStartLoc(), getContext());
4439  Operands[2] = AArch64Operand::CreateReg(
4440  RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4441  SMLoc(), SMLoc(), getContext());
4442  Operands[3] = AArch64Operand::CreateImm(
4443  ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4444  Operands.emplace_back(
4445  AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4446  WidthOp.getEndLoc(), getContext()));
4447  }
4448  }
4449  } else if (NumOperands == 5) {
4450  // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4451  // UBFIZ -> UBFM aliases.
4452  if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4453  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4454  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4455  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4456 
4457  if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4458  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4459  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4460 
4461  if (Op3CE && Op4CE) {
4462  uint64_t Op3Val = Op3CE->getValue();
4463  uint64_t Op4Val = Op4CE->getValue();
4464 
4465  uint64_t RegWidth = 0;
4466  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4467  Op1.getReg()))
4468  RegWidth = 64;
4469  else
4470  RegWidth = 32;
4471 
4472  if (Op3Val >= RegWidth)
4473  return Error(Op3.getStartLoc(),
4474  "expected integer in range [0, 31]");
4475  if (Op4Val < 1 || Op4Val > RegWidth)
4476  return Error(Op4.getStartLoc(),
4477  "expected integer in range [1, 32]");
4478 
4479  uint64_t NewOp3Val = 0;
4480  if (RegWidth == 32)
4481  NewOp3Val = (32 - Op3Val) & 0x1f;
4482  else
4483  NewOp3Val = (64 - Op3Val) & 0x3f;
4484 
4485  uint64_t NewOp4Val = Op4Val - 1;
4486 
4487  if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4488  return Error(Op4.getStartLoc(),
4489  "requested insert overflows register");
4490 
4491  const MCExpr *NewOp3 =
4492  MCConstantExpr::create(NewOp3Val, getContext());
4493  const MCExpr *NewOp4 =
4494  MCConstantExpr::create(NewOp4Val, getContext());
4495  Operands[3] = AArch64Operand::CreateImm(
4496  NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4497  Operands[4] = AArch64Operand::CreateImm(
4498  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4499  if (Tok == "bfi")
4500  Operands[0] = AArch64Operand::CreateToken(
4501  "bfm", false, Op.getStartLoc(), getContext());
4502  else if (Tok == "sbfiz")
4503  Operands[0] = AArch64Operand::CreateToken(
4504  "sbfm", false, Op.getStartLoc(), getContext());
4505  else if (Tok == "ubfiz")
4506  Operands[0] = AArch64Operand::CreateToken(
4507  "ubfm", false, Op.getStartLoc(), getContext());
4508  else
4509  llvm_unreachable("No valid mnemonic for alias?");
4510  }
4511  }
4512 
4513  // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4514  // UBFX -> UBFM aliases.
4515  } else if (NumOperands == 5 &&
4516  (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4517  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4518  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4519  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4520 
4521  if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4522  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4523  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4524 
4525  if (Op3CE && Op4CE) {
4526  uint64_t Op3Val = Op3CE->getValue();
4527  uint64_t Op4Val = Op4CE->getValue();
4528 
4529  uint64_t RegWidth = 0;
4530  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4531  Op1.getReg()))
4532  RegWidth = 64;
4533  else
4534  RegWidth = 32;
4535 
4536  if (Op3Val >= RegWidth)
4537  return Error(Op3.getStartLoc(),
4538  "expected integer in range [0, 31]");
4539  if (Op4Val < 1 || Op4Val > RegWidth)
4540  return Error(Op4.getStartLoc(),
4541  "expected integer in range [1, 32]");
4542 
4543  uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4544 
4545  if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4546  return Error(Op4.getStartLoc(),
4547  "requested extract overflows register");
4548 
4549  const MCExpr *NewOp4 =
4550  MCConstantExpr::create(NewOp4Val, getContext());
4551  Operands[4] = AArch64Operand::CreateImm(
4552  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4553  if (Tok == "bfxil")
4554  Operands[0] = AArch64Operand::CreateToken(
4555  "bfm", false, Op.getStartLoc(), getContext());
4556  else if (Tok == "sbfx")
4557  Operands[0] = AArch64Operand::CreateToken(
4558  "sbfm", false, Op.getStartLoc(), getContext());
4559  else if (Tok == "ubfx")
4560  Operands[0] = AArch64Operand::CreateToken(
4561  "ubfm", false, Op.getStartLoc(), getContext());
4562  else
4563  llvm_unreachable("No valid mnemonic for alias?");
4564  }
4565  }
4566  }
4567  }
4568 
4569  // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4570  // instruction for FP registers correctly in some rare circumstances. Convert
4571  // it to a safe instruction and warn (because silently changing someone's
4572  // assembly is rude).
4573  if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4574  NumOperands == 4 && Tok == "movi") {
4575  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4576  AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4577  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4578  if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4579  (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4580  StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4581  if (Suffix.lower() == ".2d" &&
4582  cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4583  Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4584  " correctly on this CPU, converting to equivalent movi.16b");
4585  // Switch the suffix to .16b.
4586  unsigned Idx = Op1.isToken() ? 1 : 2;
4587  Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4588  getContext());
4589  }
4590  }
4591  }
4592 
4593  // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4594  // InstAlias can't quite handle this since the reg classes aren't
4595  // subclasses.
4596  if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4597  // The source register can be Wn here, but the matcher expects a
4598  // GPR64. Twiddle it here if necessary.
4599  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4600  if (Op.isScalarReg()) {
4601  unsigned Reg = getXRegFromWReg(Op.getReg());
4602  Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4603  Op.getStartLoc(), Op.getEndLoc(),
4604  getContext());
4605  }
4606  }
4607  // FIXME: Likewise for sxt[bh] with a Xd dst operand
4608  else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4609  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4610  if (Op.isScalarReg() &&
4611  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4612  Op.getReg())) {
4613  // The source register can be Wn here, but the matcher expects a
4614  // GPR64. Twiddle it here if necessary.
4615  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4616  if (Op.isScalarReg()) {
4617  unsigned Reg = getXRegFromWReg(Op.getReg());
4618  Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4619  Op.getStartLoc(),
4620  Op.getEndLoc(), getContext());
4621  }
4622  }
4623  }
4624  // FIXME: Likewise for uxt[bh] with a Xd dst operand
4625  else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4626  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4627  if (Op.isScalarReg() &&
4628  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4629  Op.getReg())) {
4630  // The source register can be Wn here, but the matcher expects a
4631  // GPR32. Twiddle it here if necessary.
4632  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4633  if (Op.isScalarReg()) {
4634  unsigned Reg = getWRegFromXReg(Op.getReg());
4635  Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4636  Op.getStartLoc(),
4637  Op.getEndLoc(), getContext());
4638  }
4639  }
4640  }
4641 
4642  MCInst Inst;
4643  // First try to match against the secondary set of tables containing the
4644  // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4645  unsigned MatchResult =
4646  MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4647 
4648  // If that fails, try against the alternate table containing long-form NEON:
4649  // "fadd v0.2s, v1.2s, v2.2s"
4650  if (MatchResult != Match_Success) {
4651  // But first, save the short-form match result: we can use it in case the
4652  // long-form match also fails.
4653  auto ShortFormNEONErrorInfo = ErrorInfo;
4654  auto ShortFormNEONMatchResult = MatchResult;
4655 
4656  MatchResult =
4657  MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4658 
4659  // Now, both matches failed, and the long-form match failed on the mnemonic
4660  // suffix token operand. The short-form match failure is probably more
4661  // relevant: use it instead.
4662  if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4663  Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4664  ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4665  MatchResult = ShortFormNEONMatchResult;
4666  ErrorInfo = ShortFormNEONErrorInfo;
4667  }
4668  }
4669 
4670  switch (MatchResult) {
4671  case Match_Success: {
4672  // Perform range checking and other semantic validations
4673  SmallVector<SMLoc, 8> OperandLocs;
4674  NumOperands = Operands.size();
4675  for (unsigned i = 1; i < NumOperands; ++i)
4676  OperandLocs.push_back(Operands[i]->getStartLoc());
4677  if (validateInstruction(Inst, IDLoc, OperandLocs))
4678  return true;
4679 
4680  Inst.setLoc(IDLoc);
4681  Out.EmitInstruction(Inst, getSTI());
4682  return false;
4683  }
4684  case Match_MissingFeature: {
4685  assert(ErrorInfo && "Unknown missing feature!");
4686  // Special case the error message for the very common case where only
4687  // a single subtarget feature is missing (neon, e.g.).
4688  std::string Msg = "instruction requires:";
4689  uint64_t Mask = 1;
4690  for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4691  if (ErrorInfo & Mask) {
4692  Msg += " ";
4693  Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4694  }
4695  Mask <<= 1;
4696  }
4697  return Error(IDLoc, Msg);
4698  }
4699  case Match_MnemonicFail:
4700  return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
4701  case Match_InvalidOperand: {
4702  SMLoc ErrorLoc = IDLoc;
4703 
4704  if (ErrorInfo != ~0ULL) {
4705  if (ErrorInfo >= Operands.size())
4706  return Error(IDLoc, "too few operands for instruction",
4707  SMRange(IDLoc, getTok().getLoc()));
4708 
4709  ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4710  if (ErrorLoc == SMLoc())
4711  ErrorLoc = IDLoc;
4712  }
4713  // If the match failed on a suffix token operand, tweak the diagnostic
4714  // accordingly.
4715  if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4716  ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4717  MatchResult = Match_InvalidSuffix;
4718 
4719  return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4720  }
4721  case Match_InvalidTiedOperand:
4722  case Match_InvalidMemoryIndexed1:
4723  case Match_InvalidMemoryIndexed2:
4724  case Match_InvalidMemoryIndexed4:
4725  case Match_InvalidMemoryIndexed8:
4726  case Match_InvalidMemoryIndexed16:
4727  case Match_InvalidCondCode:
4728  case Match_AddSubRegExtendSmall:
4729  case Match_AddSubRegExtendLarge:
4730  case Match_AddSubSecondSource:
4731  case Match_LogicalSecondSource:
4732  case Match_AddSubRegShift32:
4733  case Match_AddSubRegShift64:
4734  case Match_InvalidMovImm32Shift:
4735  case Match_InvalidMovImm64Shift:
4736  case Match_InvalidFPImm:
4737  case Match_InvalidMemoryWExtend8:
4738  case Match_InvalidMemoryWExtend16:
4739  case Match_InvalidMemoryWExtend32:
4740  case Match_InvalidMemoryWExtend64:
4741  case Match_InvalidMemoryWExtend128:
4742  case Match_InvalidMemoryXExtend8:
4743  case Match_InvalidMemoryXExtend16:
4744  case Match_InvalidMemoryXExtend32:
4745  case Match_InvalidMemoryXExtend64:
4746  case Match_InvalidMemoryXExtend128:
4747  case Match_InvalidMemoryIndexed1SImm4:
4748  case Match_InvalidMemoryIndexed2SImm4:
4749  case Match_InvalidMemoryIndexed3SImm4:
4750  case Match_InvalidMemoryIndexed4SImm4:
4751  case Match_InvalidMemoryIndexed1SImm6:
4752  case Match_InvalidMemoryIndexed16SImm4:
4753  case Match_InvalidMemoryIndexed4SImm7:
4754  case Match_InvalidMemoryIndexed8SImm7:
4755  case Match_InvalidMemoryIndexed16SImm7:
4756  case Match_InvalidMemoryIndexed8UImm5:
4757  case Match_InvalidMemoryIndexed4UImm5:
4758  case Match_InvalidMemoryIndexed2UImm5:
4759  case Match_InvalidMemoryIndexed1UImm6:
4760  case Match_InvalidMemoryIndexed2UImm6:
4761  case Match_InvalidMemoryIndexed4UImm6:
4762  case Match_InvalidMemoryIndexed8UImm6:
4763  case Match_InvalidMemoryIndexedSImm6:
4764  case Match_InvalidMemoryIndexedSImm5:
4765  case Match_InvalidMemoryIndexedSImm8:
4766  case Match_InvalidMemoryIndexedSImm9:
4767  case Match_InvalidMemoryIndexed8SImm10:
4768  case Match_InvalidImm0_1:
4769  case Match_InvalidImm0_7:
4770  case Match_InvalidImm0_15:
4771  case Match_InvalidImm0_31:
4772  case Match_InvalidImm0_63:
4773  case Match_InvalidImm0_127:
4774  case Match_InvalidImm0_255:
4775  case Match_InvalidImm0_65535:
4776  case Match_InvalidImm1_8:
4777  case Match_InvalidImm1_16:
4778  case Match_InvalidImm1_32:
4779  case Match_InvalidImm1_64:
4780  case Match_InvalidSVEAddSubImm8:
4781  case Match_InvalidSVEAddSubImm16:
4782  case Match_InvalidSVEAddSubImm32:
4783  case Match_InvalidSVEAddSubImm64:
4784  case Match_InvalidSVECpyImm8:
4785  case Match_InvalidSVECpyImm16:
4786  case Match_InvalidSVECpyImm32:
4787  case Match_InvalidSVECpyImm64:
4788  case Match_InvalidIndexRange1_1:
4789  case Match_InvalidIndexRange0_15:
4790  case Match_InvalidIndexRange0_7:
4791  case Match_InvalidIndexRange0_3:
4792  case Match_InvalidIndexRange0_1:
4793  case Match_InvalidSVEIndexRange0_63:
4794  case Match_InvalidSVEIndexRange0_31:
4795  case Match_InvalidSVEIndexRange0_15:
4796  case Match_InvalidSVEIndexRange0_7:
4797  case Match_InvalidSVEIndexRange0_3:
4798  case Match_InvalidLabel:
4799  case Match_InvalidComplexRotationEven:
4800  case Match_InvalidComplexRotationOdd:
4801  case Match_InvalidGPR64shifted8:
4802  case Match_InvalidGPR64shifted16:
4803  case Match_InvalidGPR64shifted32:
4804  case Match_InvalidGPR64shifted64:
4805  case Match_InvalidGPR64NoXZRshifted8:
4806  case Match_InvalidGPR64NoXZRshifted16:
4807  case Match_InvalidGPR64NoXZRshifted32:
4808  case Match_InvalidGPR64NoXZRshifted64:
4809  case Match_InvalidZPR32UXTW8:
4810  case Match_InvalidZPR32UXTW16:
4811  case Match_InvalidZPR32UXTW32:
4812  case Match_InvalidZPR32UXTW64:
4813  case Match_InvalidZPR32SXTW8:
4814  case Match_InvalidZPR32SXTW16:
4815  case Match_InvalidZPR32SXTW32:
4816  case Match_InvalidZPR32SXTW64:
4817  case Match_InvalidZPR64UXTW8:
4818  case Match_InvalidZPR64SXTW8:
4819  case Match_InvalidZPR64UXTW16:
4820  case Match_InvalidZPR64SXTW16:
4821  case Match_InvalidZPR64UXTW32:
4822  case Match_InvalidZPR64SXTW32:
4823  case Match_InvalidZPR64UXTW64:
4824  case Match_InvalidZPR64SXTW64:
4825  case Match_InvalidZPR32LSL8:
4826  case Match_InvalidZPR32LSL16:
4827  case Match_InvalidZPR32LSL32:
4828  case Match_InvalidZPR32LSL64:
4829  case Match_InvalidZPR64LSL8:
4830  case Match_InvalidZPR64LSL16:
4831  case Match_InvalidZPR64LSL32:
4832  case Match_InvalidZPR64LSL64:
4833  case Match_InvalidZPR0:
4834  case Match_InvalidZPR8:
4835  case Match_InvalidZPR16:
4836  case Match_InvalidZPR32:
4837  case Match_InvalidZPR64:
4838  case Match_InvalidZPR128:
4839  case Match_InvalidZPR_3b8:
4840  case Match_InvalidZPR_3b16:
4841  case Match_InvalidZPR_3b32:
4842  case Match_InvalidZPR_4b16:
4843  case Match_InvalidZPR_4b32:
4844  case Match_InvalidZPR_4b64:
4845  case Match_InvalidSVEPredicateAnyReg:
4846  case Match_InvalidSVEPattern:
4847  case Match_InvalidSVEPredicateBReg:
4848  case Match_InvalidSVEPredicateHReg:
4849  case Match_InvalidSVEPredicateSReg:
4850  case Match_InvalidSVEPredicateDReg:
4851  case Match_InvalidSVEPredicate3bAnyReg:
4852  case Match_InvalidSVEPredicate3bBReg:
4853  case Match_InvalidSVEPredicate3bHReg:
4854  case Match_InvalidSVEPredicate3bSReg:
4855  case Match_InvalidSVEPredicate3bDReg:
4856  case Match_InvalidSVEExactFPImmOperandHalfOne:
4857  case Match_InvalidSVEExactFPImmOperandHalfTwo:
4858  case Match_InvalidSVEExactFPImmOperandZeroOne:
4859  case Match_MSR:
4860  case Match_MRS: {
4861  if (ErrorInfo >= Operands.size())
4862  return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
4863  // Any time we get here, there's nothing fancy to do. Just get the
4864  // operand SMLoc and display the diagnostic.
4865  SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4866  if (ErrorLoc == SMLoc())
4867  ErrorLoc = IDLoc;
4868  return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4869  }
4870  }
4871 
4872  llvm_unreachable("Implement any new match types added!");
4873 }
4874 
4875 /// ParseDirective parses the arm specific directives
4876 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4878  getContext().getObjectFileInfo()->getObjectFileType();
4879  bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4880 
4881  StringRef IDVal = DirectiveID.getIdentifier();
4882  SMLoc Loc = DirectiveID.getLoc();
4883  if (IDVal == ".arch")
4884  parseDirectiveArch(Loc);
4885  else if (IDVal == ".cpu")
4886  parseDirectiveCPU(Loc);
4887  else if (IDVal == ".tlsdesccall")
4888  parseDirectiveTLSDescCall(Loc);
4889  else if (IDVal == ".ltorg" || IDVal == ".pool")
4890  parseDirectiveLtorg(Loc);
4891  else if (IDVal == ".unreq")
4892  parseDirectiveUnreq(Loc);
4893  else if (IDVal == ".inst")
4894  parseDirectiveInst(Loc);
4895  else if (IsMachO) {
4896  if (IDVal == MCLOHDirectiveName())
4897  parseDirectiveLOH(IDVal, Loc);
4898  else
4899  return true;
4900  } else
4901  return true;
4902  return false;
4903 }
4904 
4905 static const struct {
4906  const char *Name;
4908 } ExtensionMap[] = {
4909  { "crc", {AArch64::FeatureCRC} },
4910  { "sm4", {AArch64::FeatureSM4} },
4911  { "sha3", {AArch64::FeatureSHA3} },
4912  { "sha2", {AArch64::FeatureSHA2} },
4913  { "aes", {AArch64::FeatureAES} },
4914  { "crypto", {AArch64::FeatureCrypto} },
4915  { "fp", {AArch64::FeatureFPARMv8} },
4916  { "simd", {AArch64::FeatureNEON} },
4917  { "ras", {AArch64::FeatureRAS} },
4918  { "lse", {AArch64::FeatureLSE} },
4919 
4920  // FIXME: Unsupported extensions
4921  { "pan", {} },
4922  { "lor", {} },
4923  { "rdma", {} },
4924  { "profile", {} },
4925 };
4926 
4928  SmallVector<StringRef, 4> &RequestedExtensions) {
4929  const bool NoCrypto =
4930  (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
4931  "nocrypto") != std::end(RequestedExtensions));
4932  const bool Crypto =
4933  (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
4934  "crypto") != std::end(RequestedExtensions));
4935 
4936  if (!NoCrypto && Crypto) {
4937  switch (ArchKind) {
4938  default:
4939  // Map 'generic' (and others) to sha2 and aes, because
4940  // that was the traditional meaning of crypto.
4941  case AArch64::ArchKind::ARMV8_1A:
4942  case AArch64::ArchKind::ARMV8_2A:
4943  case AArch64::ArchKind::ARMV8_3A:
4944  RequestedExtensions.push_back("sha2");
4945  RequestedExtensions.push_back("aes");
4946  break;
4947  case AArch64::ArchKind::ARMV8_4A:
4948  RequestedExtensions.push_back("sm4");
4949  RequestedExtensions.push_back("sha3");
4950  RequestedExtensions.push_back("sha2");
4951  RequestedExtensions.push_back("aes");
4952  break;
4953  }
4954  } else if (NoCrypto) {
4955  switch (ArchKind) {
4956  default:
4957  // Map 'generic' (and others) to sha2 and aes, because
4958  // that was the traditional meaning of crypto.
4959  case AArch64::ArchKind::ARMV8_1A:
4960  case AArch64::ArchKind::ARMV8_2A:
4961  case AArch64::ArchKind::ARMV8_3A:
4962  RequestedExtensions.push_back("nosha2");
4963  RequestedExtensions.push_back("noaes");
4964  break;
4965  case AArch64::ArchKind::ARMV8_4A:
4966  RequestedExtensions.push_back("nosm4");
4967  RequestedExtensions.push_back("nosha3");
4968  RequestedExtensions.push_back("nosha2");
4969  RequestedExtensions.push_back("noaes");
4970  break;
4971  }
4972  }
4973 }
4974 
4975 /// parseDirectiveArch
4976 /// ::= .arch token
4977 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
4978  SMLoc ArchLoc = getLoc();
4979 
4980  StringRef Arch, ExtensionString;
4981  std::tie(Arch, ExtensionString) =
4982  getParser().parseStringToEndOfStatement().trim().split('+');
4983 
4985  if (ID == AArch64::ArchKind::INVALID)
4986  return Error(ArchLoc, "unknown arch name");
4987 
4988  if (parseToken(AsmToken::EndOfStatement))
4989  return true;
4990 
4991  // Get the architecture and extension features.
4992  std::vector<StringRef> AArch64Features;
4993  AArch64::getArchFeatures(ID, AArch64Features);
4995  AArch64Features);
4996 
4997  MCSubtargetInfo &STI = copySTI();
4998  std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
4999  STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
5000 
5001  SmallVector<StringRef, 4> RequestedExtensions;
5002  if (!ExtensionString.empty())
5003  ExtensionString.split(RequestedExtensions, '+');
5004 
5005  ExpandCryptoAEK(ID, RequestedExtensions);
5006 
5008  for (auto Name : RequestedExtensions) {
5009  bool EnableFeature = true;
5010 
5011  if (Name.startswith_lower("no")) {
5012  EnableFeature = false;
5013  Name = Name.substr(2);
5014  }
5015 
5016  for (const auto &Extension : ExtensionMap) {
5017  if (Extension.Name != Name)
5018  continue;
5019 
5020  if (Extension.Features.none())
5021  report_fatal_error("unsupported architectural extension: " + Name);
5022 
5023  FeatureBitset ToggleFeatures = EnableFeature
5024  ? (~Features & Extension.Features)
5025  : ( Features & Extension.Features);
5026  uint64_t Features =
5027  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5028  setAvailableFeatures(Features);
5029  break;
5030  }
5031  }
5032  return false;
5033 }
5034 
5035 static SMLoc incrementLoc(SMLoc L, int Offset) {
5036  return SMLoc::getFromPointer(L.getPointer() + Offset);
5037 }
5038 
5039 /// parseDirectiveCPU
5040 /// ::= .cpu id
5041 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5042  SMLoc CurLoc = getLoc();
5043 
5044  StringRef CPU, ExtensionString;
5045  std::tie(CPU, ExtensionString) =
5046  getParser().parseStringToEndOfStatement().trim().split('+');
5047 
5048  if (parseToken(AsmToken::EndOfStatement))
5049  return true;
5050 
5051  SmallVector<StringRef, 4> RequestedExtensions;
5052  if (!ExtensionString.empty())
5053  ExtensionString.split(RequestedExtensions, '+');
5054 
5055  // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5056  // once that is tablegen'ed
5057  if (!getSTI().isCPUStringValid(CPU)) {
5058  Error(CurLoc, "unknown CPU name");
5059  return false;
5060  }
5061 
5062  MCSubtargetInfo &STI = copySTI();
5063  STI.setDefaultFeatures(CPU, "");
5064  CurLoc = incrementLoc(CurLoc, CPU.size());
5065 
5066  ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5067 
5069  for (auto Name : RequestedExtensions) {
5070  // Advance source location past '+'.
5071  CurLoc = incrementLoc(CurLoc, 1);
5072 
5073  bool EnableFeature = true;
5074 
5075  if (Name.startswith_lower("no")) {
5076  EnableFeature = false;
5077  Name = Name.substr(2);
5078  }
5079 
5080  bool FoundExtension = false;
5081  for (const auto &Extension : ExtensionMap) {
5082  if (Extension.Name != Name)
5083  continue;
5084 
5085  if (Extension.Features.none())
5086  report_fatal_error("unsupported architectural extension: " + Name);
5087 
5088  FeatureBitset ToggleFeatures = EnableFeature
5089  ? (~Features & Extension.Features)
5090  : ( Features & Extension.Features);
5091  uint64_t Features =
5092  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5093  setAvailableFeatures(Features);
5094  FoundExtension = true;
5095 
5096  break;
5097  }
5098 
5099  if (!FoundExtension)
5100  Error(CurLoc, "unsupported architectural extension");
5101 
5102  CurLoc = incrementLoc(CurLoc, Name.size());
5103  }
5104  return false;
5105 }
5106 
5107 /// parseDirectiveInst
5108 /// ::= .inst opcode [, ...]
5109 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5110  if (getLexer().is(AsmToken::EndOfStatement))
5111  return Error(Loc, "expected expression following '.inst' directive");
5112 
5113  auto parseOp = [&]() -> bool {
5114  SMLoc L = getLoc();
5115  const MCExpr *Expr;
5116  if (check(getParser().parseExpression(Expr), L, "expected expression"))
5117  return true;
5118  const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5119  if (check(!Value, L, "expected constant expression"))
5120  return true;
5121  getTargetStreamer().emitInst(Value->getValue());
5122  return false;
5123  };
5124 
5125  if (parseMany(parseOp))
5126  return addErrorSuffix(" in '.inst' directive");
5127  return false;
5128 }
5129 
5130 // parseDirectiveTLSDescCall:
5131 // ::= .tlsdesccall symbol
5132 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5133  StringRef Name;
5134  if (check(getParser().parseIdentifier(Name), L,
5135  "expected symbol after directive") ||
5136  parseToken(AsmToken::EndOfStatement))
5137  return true;
5138 
5139  MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5140  const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5141  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5142 
5143  MCInst Inst;
5144  Inst.setOpcode(AArch64::TLSDESCCALL);
5145  Inst.addOperand(MCOperand::createExpr(Expr));
5146 
5147  getParser().getStreamer().EmitInstruction(Inst, getSTI());
5148  return false;
5149 }
5150 
5151 /// ::= .loh <lohName | lohId> label1, ..., labelN
5152 /// The number of arguments depends on the loh identifier.
5153 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5154  MCLOHType Kind;
5155  if (getParser().getTok().isNot(AsmToken::Identifier)) {
5156  if (getParser().getTok().isNot(AsmToken::Integer))
5157  return TokError("expected an identifier or a number in directive");
5158  // We successfully get a numeric value for the identifier.
5159  // Check if it is valid.
5160  int64_t Id = getParser().getTok().getIntVal();
5161  if (Id <= -1U && !isValidMCLOHType(Id))
5162  return TokError("invalid numeric identifier in directive");
5163  Kind = (MCLOHType)Id;
5164  } else {
5165  StringRef Name = getTok().getIdentifier();
5166  // We successfully parse an identifier.
5167  // Check if it is a recognized one.
5168  int Id = MCLOHNameToId(Name);
5169 
5170  if (Id == -1)
5171  return TokError("invalid identifier in directive");
5172  Kind = (MCLOHType)Id;
5173  }
5174  // Consume the identifier.
5175  Lex();
5176  // Get the number of arguments of this LOH.
5177  int NbArgs = MCLOHIdToNbArgs(Kind);
5178 
5179  assert(NbArgs != -1 && "Invalid number of arguments");
5180 
5182  for (int Idx = 0; Idx < NbArgs; ++Idx) {
5183  StringRef Name;
5184  if (getParser().parseIdentifier(Name))
5185  return TokError("expected identifier in directive");
5186  Args.push_back(getContext().getOrCreateSymbol(Name));
5187 
5188  if (Idx + 1 == NbArgs)
5189  break;
5190  if (parseToken(AsmToken::Comma,
5191  "unexpected token in '" + Twine(IDVal) + "' directive"))
5192  return true;
5193  }
5194  if (parseToken(AsmToken::EndOfStatement,
5195  "unexpected token in '" + Twine(IDVal) + "' directive"))
5196  return true;
5197 
5198  getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
5199  return false;
5200 }
5201 
5202 /// parseDirectiveLtorg
5203 /// ::= .ltorg | .pool
5204 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5205  if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5206  return true;
5207  getTargetStreamer().emitCurrentConstantPool();
5208  return false;
5209 }
5210 
5211 /// parseDirectiveReq
5212 /// ::= name .req registername
5213 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5214  MCAsmParser &Parser = getParser();
5215  Parser.Lex(); // Eat the '.req' token.
5216  SMLoc SRegLoc = getLoc();
5218  unsigned RegNum;
5219  OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5220 
5221  if (ParseRes != MatchOperand_Success) {
5222  StringRef Kind;
5223  RegisterKind = RegKind::NeonVector;
5224  ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5225 
5226  if (ParseRes == MatchOperand_ParseFail)
5227  return true;
5228 
5229  if (ParseRes == MatchOperand_Success && !Kind.empty())
5230  return Error(SRegLoc, "vector register without type specifier expected");
5231  }
5232 
5233  if (ParseRes != MatchOperand_Success) {
5234  StringRef Kind;
5235  RegisterKind = RegKind::SVEDataVector;
5236  ParseRes =
5237  tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5238 
5239  if (ParseRes == MatchOperand_ParseFail)
5240  return true;
5241 
5242  if (ParseRes == MatchOperand_Success && !Kind.empty())
5243  return Error(SRegLoc,
5244  "sve vector register without type specifier expected");
5245  }
5246 
5247  if (ParseRes != MatchOperand_Success) {
5248  StringRef Kind;
5249  RegisterKind = RegKind::SVEPredicateVector;
5250  ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5251 
5252  if (ParseRes == MatchOperand_ParseFail)
5253  return true;
5254 
5255  if (ParseRes == MatchOperand_Success && !Kind.empty())
5256  return Error(SRegLoc,
5257  "sve predicate register without type specifier expected");
5258  }
5259 
5260  if (ParseRes != MatchOperand_Success)
5261  return Error(SRegLoc, "register name or alias expected");
5262 
5263  // Shouldn't be anything else.
5264  if (parseToken(AsmToken::EndOfStatement,
5265  "unexpected input in .req directive"))
5266  return true;
5267 
5268  auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
5269  if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
5270  Warning(L, "ignoring redefinition of register alias '" + Name + "'");
5271 
5272  return false;
5273 }
5274 
5275 /// parseDirectiveUneq
5276 /// ::= .unreq registername
5277 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
5278  MCAsmParser &Parser = getParser();
5279  if (getTok().isNot(AsmToken::Identifier))
5280  return TokError("unexpected input in .unreq directive.");
5281  RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
5282  Parser.Lex(); // Eat the identifier.
5283  if (parseToken(AsmToken::EndOfStatement))
5284  return addErrorSuffix("in '.unreq' directive");
5285  return false;
5286 }
5287 
5288 bool
5289 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
5290  AArch64MCExpr::VariantKind &ELFRefKind,
5291  MCSymbolRefExpr::VariantKind &DarwinRefKind,
5292  int64_t &Addend) {
5293  ELFRefKind = AArch64MCExpr::VK_INVALID;
5294  DarwinRefKind = MCSymbolRefExpr::VK_None;
5295  Addend = 0;
5296 
5297  if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
5298  ELFRefKind = AE->getKind();
5299  Expr = AE->getSubExpr();
5300  }
5301 
5302  const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
5303  if (SE) {
5304  // It's a simple symbol reference with no addend.
5305  DarwinRefKind = SE->getKind();
5306  return true;
5307  }
5308 
5309  // Check that it looks like a symbol + an addend
5310  MCValue Res;
5311  bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
5312  if (!Relocatable || !Res.getSymA() || Res.getSymB())
5313  return false;
5314 
5315  DarwinRefKind = Res.getSymA()->getKind();
5316  Addend = Res.getConstant();
5317 
5318  // It's some symbol reference + a constant addend, but really
5319  // shouldn't use both Darwin and ELF syntax.
5320  return ELFRefKind == AArch64MCExpr::VK_INVALID ||
5321  DarwinRefKind == MCSymbolRefExpr::VK_None;
5322 }
5323 
5324 /// Force static initialization.
5329 }
5330 
5331 #define GET_REGISTER_MATCHER
5332 #define GET_SUBTARGET_FEATURE_NAME
5333 #define GET_MATCHER_IMPLEMENTATION
5334 #define GET_MNEMONIC_SPELL_CHECKER
5335 #include "AArch64GenAsmMatcher.inc"
5336 
5337 // Define this matcher function after the auto-generated include so we
5338 // have the match class enum definitions.
5339 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
5340  unsigned Kind) {
5341  AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
5342  // If the kind is a token for a literal immediate, check if our asm
5343  // operand matches. This is for InstAliases which have a fixed-value
5344  // immediate in the syntax.
5345  int64_t ExpectedVal;
5346  switch (Kind) {
5347  default:
5348  return Match_InvalidOperand;
5349  case MCK__35_0:
5350  ExpectedVal = 0;
5351  break;
5352  case MCK__35_1:
5353  ExpectedVal = 1;
5354  break;
5355  case MCK__35_12:
5356  ExpectedVal = 12;
5357  break;
5358  case MCK__35_16:
5359  ExpectedVal = 16;
5360  break;
5361  case MCK__35_2:
5362  ExpectedVal = 2;
5363  break;
5364  case MCK__35_24:
5365  ExpectedVal = 24;
5366  break;
5367  case MCK__35_3:
5368  ExpectedVal = 3;
5369  break;
5370  case MCK__35_32:
5371  ExpectedVal = 32;
5372  break;
5373  case MCK__35_4:
5374  ExpectedVal = 4;
5375  break;
5376  case MCK__35_48:
5377  ExpectedVal = 48;
5378  break;
5379  case MCK__35_6:
5380  ExpectedVal = 6;
5381  break;
5382  case MCK__35_64:
5383  ExpectedVal = 64;
5384  break;
5385  case MCK__35_8:
5386  ExpectedVal = 8;
5387  break;
5388  }
5389  if (!Op.isImm())
5390  return Match_InvalidOperand;
5391  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
5392  if (!CE)
5393  return Match_InvalidOperand;
5394  if (CE->getValue() == ExpectedVal)
5395  return Match_Success;
5396  return Match_InvalidOperand;
5397 }
5398 
5400 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
5401 
5402  SMLoc S = getLoc();
5403 
5404  if (getParser().getTok().isNot(AsmToken::Identifier)) {
5405  Error(S, "expected register");
5406  return MatchOperand_ParseFail;
5407  }
5408 
5409  unsigned FirstReg;
5410  OperandMatchResultTy Res = tryParseScalarRegister(FirstReg);
5411  if (Res != MatchOperand_Success)
5412  return MatchOperand_ParseFail;
5413 
5414  const MCRegisterClass &WRegClass =
5415  AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
5416  const MCRegisterClass &XRegClass =
5417  AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
5418 
5419  bool isXReg = XRegClass.contains(FirstReg),
5420  isWReg = WRegClass.contains(FirstReg);
5421  if (!isXReg && !isWReg) {
5422  Error(S, "expected first even register of a "
5423  "consecutive same-size even/odd register pair");
5424  return MatchOperand_ParseFail;
5425  }
5426 
5427  const MCRegisterInfo *RI = getContext().getRegisterInfo();
5428  unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
5429 
5430  if (FirstEncoding & 0x1) {
5431  Error(S, "expected first even register of a "
5432  "consecutive same-size even/odd register pair");
5433  return MatchOperand_ParseFail;
5434  }
5435 
5436  if (getParser().getTok().isNot(AsmToken::Comma)) {
5437  Error(getLoc(), "expected comma");
5438  return MatchOperand_ParseFail;
5439  }
5440  // Eat the comma
5441  getParser().Lex();
5442 
5443  SMLoc E = getLoc();
5444  unsigned SecondReg;
5445  Res = tryParseScalarRegister(SecondReg);
5446  if (Res != MatchOperand_Success)
5447  return MatchOperand_ParseFail;
5448 
5449  if (RI->